2005-06-28 Paul Brook <paul@codesourcery.com>
[official-gcc.git] / gcc / optabs.c
blob6f5716ebcdac3b90a8df57c697d22440a5e1a545
1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "toplev.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
32 #include "rtl.h"
33 #include "tree.h"
34 #include "tm_p.h"
35 #include "flags.h"
36 #include "function.h"
37 #include "except.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "libfuncs.h"
41 #include "recog.h"
42 #include "reload.h"
43 #include "ggc.h"
44 #include "real.h"
45 #include "basic-block.h"
46 #include "target.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 optab optab_table[OTI_MAX];
59 rtx libfunc_table[LTI_MAX];
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table[CTI_MAX];
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab[NUM_RTX_CODE + 1];
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
70 rtxfun bcc_gen_fctn[NUM_RTX_CODE];
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
76 enum insn_code setcc_gen_code[NUM_RTX_CODE];
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
84 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
85 #endif
87 /* Indexed by the machine mode, gives the insn code for vector conditional
88 operation. */
90 enum insn_code vcond_gen_code[NUM_MACHINE_MODES];
91 enum insn_code vcondu_gen_code[NUM_MACHINE_MODES];
93 /* The insn generating function can not take an rtx_code argument.
94 TRAP_RTX is used as an rtx argument. Its code is replaced with
95 the code to be used in the trap insn and all other fields are ignored. */
96 static GTY(()) rtx trap_rtx;
98 static int add_equal_note (rtx, rtx, enum rtx_code, rtx, rtx);
99 static rtx widen_operand (rtx, enum machine_mode, enum machine_mode, int,
100 int);
101 static void prepare_cmp_insn (rtx *, rtx *, enum rtx_code *, rtx,
102 enum machine_mode *, int *,
103 enum can_compare_purpose);
104 static enum insn_code can_fix_p (enum machine_mode, enum machine_mode, int,
105 int *);
106 static enum insn_code can_float_p (enum machine_mode, enum machine_mode, int);
107 static optab new_optab (void);
108 static convert_optab new_convert_optab (void);
109 static inline optab init_optab (enum rtx_code);
110 static inline optab init_optabv (enum rtx_code);
111 static inline convert_optab init_convert_optab (enum rtx_code);
112 static void init_libfuncs (optab, int, int, const char *, int);
113 static void init_integral_libfuncs (optab, const char *, int);
114 static void init_floating_libfuncs (optab, const char *, int);
115 static void init_interclass_conv_libfuncs (convert_optab, const char *,
116 enum mode_class, enum mode_class);
117 static void init_intraclass_conv_libfuncs (convert_optab, const char *,
118 enum mode_class, bool);
119 static void emit_cmp_and_jump_insn_1 (rtx, rtx, enum machine_mode,
120 enum rtx_code, int, rtx);
121 static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *,
122 enum machine_mode *, int *);
123 static rtx widen_clz (enum machine_mode, rtx, rtx);
124 static rtx expand_parity (enum machine_mode, rtx, rtx);
125 static enum rtx_code get_rtx_code (enum tree_code, bool);
126 static rtx vector_compare_rtx (tree, bool, enum insn_code);
128 #ifndef HAVE_conditional_trap
129 #define HAVE_conditional_trap 0
130 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
131 #endif
133 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
134 the result of operation CODE applied to OP0 (and OP1 if it is a binary
135 operation).
137 If the last insn does not set TARGET, don't do anything, but return 1.
139 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
140 don't add the REG_EQUAL note but return 0. Our caller can then try
141 again, ensuring that TARGET is not one of the operands. */
143 static int
144 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
146 rtx last_insn, insn, set;
147 rtx note;
149 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
151 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
152 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
153 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
154 && GET_RTX_CLASS (code) != RTX_COMPARE
155 && GET_RTX_CLASS (code) != RTX_UNARY)
156 return 1;
158 if (GET_CODE (target) == ZERO_EXTRACT)
159 return 1;
161 for (last_insn = insns;
162 NEXT_INSN (last_insn) != NULL_RTX;
163 last_insn = NEXT_INSN (last_insn))
166 set = single_set (last_insn);
167 if (set == NULL_RTX)
168 return 1;
170 if (! rtx_equal_p (SET_DEST (set), target)
171 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
172 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
173 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
174 return 1;
176 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
177 besides the last insn. */
178 if (reg_overlap_mentioned_p (target, op0)
179 || (op1 && reg_overlap_mentioned_p (target, op1)))
181 insn = PREV_INSN (last_insn);
182 while (insn != NULL_RTX)
184 if (reg_set_p (target, insn))
185 return 0;
187 insn = PREV_INSN (insn);
191 if (GET_RTX_CLASS (code) == RTX_UNARY)
192 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
193 else
194 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
196 set_unique_reg_note (last_insn, REG_EQUAL, note);
198 return 1;
201 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
202 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
203 not actually do a sign-extend or zero-extend, but can leave the
204 higher-order bits of the result rtx undefined, for example, in the case
205 of logical operations, but not right shifts. */
207 static rtx
208 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
209 int unsignedp, int no_extend)
211 rtx result;
213 /* If we don't have to extend and this is a constant, return it. */
214 if (no_extend && GET_MODE (op) == VOIDmode)
215 return op;
217 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
218 extend since it will be more efficient to do so unless the signedness of
219 a promoted object differs from our extension. */
220 if (! no_extend
221 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
222 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
223 return convert_modes (mode, oldmode, op, unsignedp);
225 /* If MODE is no wider than a single word, we return a paradoxical
226 SUBREG. */
227 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
228 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
230 /* Otherwise, get an object of MODE, clobber it, and set the low-order
231 part to OP. */
233 result = gen_reg_rtx (mode);
234 emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
235 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
236 return result;
239 /* Return the optab used for computing the operation given by
240 the tree code, CODE. This function is not always usable (for
241 example, it cannot give complete results for multiplication
242 or division) but probably ought to be relied on more widely
243 throughout the expander. */
244 optab
245 optab_for_tree_code (enum tree_code code, tree type)
247 bool trapv;
248 switch (code)
250 case BIT_AND_EXPR:
251 return and_optab;
253 case BIT_IOR_EXPR:
254 return ior_optab;
256 case BIT_NOT_EXPR:
257 return one_cmpl_optab;
259 case BIT_XOR_EXPR:
260 return xor_optab;
262 case TRUNC_MOD_EXPR:
263 case CEIL_MOD_EXPR:
264 case FLOOR_MOD_EXPR:
265 case ROUND_MOD_EXPR:
266 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
268 case RDIV_EXPR:
269 case TRUNC_DIV_EXPR:
270 case CEIL_DIV_EXPR:
271 case FLOOR_DIV_EXPR:
272 case ROUND_DIV_EXPR:
273 case EXACT_DIV_EXPR:
274 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
276 case LSHIFT_EXPR:
277 return ashl_optab;
279 case RSHIFT_EXPR:
280 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
282 case LROTATE_EXPR:
283 return rotl_optab;
285 case RROTATE_EXPR:
286 return rotr_optab;
288 case MAX_EXPR:
289 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
291 case MIN_EXPR:
292 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
294 case REALIGN_LOAD_EXPR:
295 return vec_realign_load_optab;
297 case REDUC_MAX_EXPR:
298 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
300 case REDUC_MIN_EXPR:
301 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
303 case REDUC_PLUS_EXPR:
304 return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
306 case VEC_LSHIFT_EXPR:
307 return vec_shl_optab;
309 case VEC_RSHIFT_EXPR:
310 return vec_shr_optab;
312 default:
313 break;
316 trapv = flag_trapv && INTEGRAL_TYPE_P (type) && !TYPE_UNSIGNED (type);
317 switch (code)
319 case PLUS_EXPR:
320 return trapv ? addv_optab : add_optab;
322 case MINUS_EXPR:
323 return trapv ? subv_optab : sub_optab;
325 case MULT_EXPR:
326 return trapv ? smulv_optab : smul_optab;
328 case NEGATE_EXPR:
329 return trapv ? negv_optab : neg_optab;
331 case ABS_EXPR:
332 return trapv ? absv_optab : abs_optab;
334 default:
335 return NULL;
340 /* Generate code to perform an operation specified by TERNARY_OPTAB
341 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
343 UNSIGNEDP is for the case where we have to widen the operands
344 to perform the operation. It says to use zero-extension.
346 If TARGET is nonzero, the value
347 is generated there, if it is convenient to do so.
348 In all cases an rtx is returned for the locus of the value;
349 this may or may not be TARGET. */
352 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
353 rtx op1, rtx op2, rtx target, int unsignedp)
355 int icode = (int) ternary_optab->handlers[(int) mode].insn_code;
356 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
357 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
358 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
359 rtx temp;
360 rtx pat;
361 rtx xop0 = op0, xop1 = op1, xop2 = op2;
363 gcc_assert (ternary_optab->handlers[(int) mode].insn_code
364 != CODE_FOR_nothing);
366 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
367 temp = gen_reg_rtx (mode);
368 else
369 temp = target;
371 /* In case the insn wants input operands in modes different from
372 those of the actual operands, convert the operands. It would
373 seem that we don't need to convert CONST_INTs, but we do, so
374 that they're properly zero-extended, sign-extended or truncated
375 for their mode. */
377 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
378 xop0 = convert_modes (mode0,
379 GET_MODE (op0) != VOIDmode
380 ? GET_MODE (op0)
381 : mode,
382 xop0, unsignedp);
384 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
385 xop1 = convert_modes (mode1,
386 GET_MODE (op1) != VOIDmode
387 ? GET_MODE (op1)
388 : mode,
389 xop1, unsignedp);
391 if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
392 xop2 = convert_modes (mode2,
393 GET_MODE (op2) != VOIDmode
394 ? GET_MODE (op2)
395 : mode,
396 xop2, unsignedp);
398 /* Now, if insn's predicates don't allow our operands, put them into
399 pseudo regs. */
401 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
402 && mode0 != VOIDmode)
403 xop0 = copy_to_mode_reg (mode0, xop0);
405 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
406 && mode1 != VOIDmode)
407 xop1 = copy_to_mode_reg (mode1, xop1);
409 if (!insn_data[icode].operand[3].predicate (xop2, mode2)
410 && mode2 != VOIDmode)
411 xop2 = copy_to_mode_reg (mode2, xop2);
413 pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
415 emit_insn (pat);
416 return temp;
420 /* Like expand_binop, but return a constant rtx if the result can be
421 calculated at compile time. The arguments and return value are
422 otherwise the same as for expand_binop. */
424 static rtx
425 simplify_expand_binop (enum machine_mode mode, optab binoptab,
426 rtx op0, rtx op1, rtx target, int unsignedp,
427 enum optab_methods methods)
429 if (CONSTANT_P (op0) && CONSTANT_P (op1))
430 return simplify_gen_binary (binoptab->code, mode, op0, op1);
431 else
432 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
435 /* Like simplify_expand_binop, but always put the result in TARGET.
436 Return true if the expansion succeeded. */
438 bool
439 force_expand_binop (enum machine_mode mode, optab binoptab,
440 rtx op0, rtx op1, rtx target, int unsignedp,
441 enum optab_methods methods)
443 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
444 target, unsignedp, methods);
445 if (x == 0)
446 return false;
447 if (x != target)
448 emit_move_insn (target, x);
449 return true;
452 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
455 expand_vec_shift_expr (tree vec_shift_expr, rtx target)
457 enum insn_code icode;
458 rtx rtx_op1, rtx_op2;
459 enum machine_mode mode1;
460 enum machine_mode mode2;
461 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_shift_expr));
462 tree vec_oprnd = TREE_OPERAND (vec_shift_expr, 0);
463 tree shift_oprnd = TREE_OPERAND (vec_shift_expr, 1);
464 optab shift_optab;
465 rtx pat;
467 switch (TREE_CODE (vec_shift_expr))
469 case VEC_RSHIFT_EXPR:
470 shift_optab = vec_shr_optab;
471 break;
472 case VEC_LSHIFT_EXPR:
473 shift_optab = vec_shl_optab;
474 break;
475 default:
476 gcc_unreachable ();
479 icode = (int) shift_optab->handlers[(int) mode].insn_code;
480 gcc_assert (icode != CODE_FOR_nothing);
482 mode1 = insn_data[icode].operand[1].mode;
483 mode2 = insn_data[icode].operand[2].mode;
485 rtx_op1 = expand_expr (vec_oprnd, NULL_RTX, VOIDmode, EXPAND_NORMAL);
486 if (!(*insn_data[icode].operand[1].predicate) (rtx_op1, mode1)
487 && mode1 != VOIDmode)
488 rtx_op1 = force_reg (mode1, rtx_op1);
490 rtx_op2 = expand_expr (shift_oprnd, NULL_RTX, VOIDmode, EXPAND_NORMAL);
491 if (!(*insn_data[icode].operand[2].predicate) (rtx_op2, mode2)
492 && mode2 != VOIDmode)
493 rtx_op2 = force_reg (mode2, rtx_op2);
495 if (!target
496 || ! (*insn_data[icode].operand[0].predicate) (target, mode))
497 target = gen_reg_rtx (mode);
499 /* Emit instruction */
500 pat = GEN_FCN (icode) (target, rtx_op1, rtx_op2);
501 gcc_assert (pat);
502 emit_insn (pat);
504 return target;
507 /* This subroutine of expand_doubleword_shift handles the cases in which
508 the effective shift value is >= BITS_PER_WORD. The arguments and return
509 value are the same as for the parent routine, except that SUPERWORD_OP1
510 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
511 INTO_TARGET may be null if the caller has decided to calculate it. */
513 static bool
514 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
515 rtx outof_target, rtx into_target,
516 int unsignedp, enum optab_methods methods)
518 if (into_target != 0)
519 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
520 into_target, unsignedp, methods))
521 return false;
523 if (outof_target != 0)
525 /* For a signed right shift, we must fill OUTOF_TARGET with copies
526 of the sign bit, otherwise we must fill it with zeros. */
527 if (binoptab != ashr_optab)
528 emit_move_insn (outof_target, CONST0_RTX (word_mode));
529 else
530 if (!force_expand_binop (word_mode, binoptab,
531 outof_input, GEN_INT (BITS_PER_WORD - 1),
532 outof_target, unsignedp, methods))
533 return false;
535 return true;
538 /* This subroutine of expand_doubleword_shift handles the cases in which
539 the effective shift value is < BITS_PER_WORD. The arguments and return
540 value are the same as for the parent routine. */
542 static bool
543 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
544 rtx outof_input, rtx into_input, rtx op1,
545 rtx outof_target, rtx into_target,
546 int unsignedp, enum optab_methods methods,
547 unsigned HOST_WIDE_INT shift_mask)
549 optab reverse_unsigned_shift, unsigned_shift;
550 rtx tmp, carries;
552 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
553 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
555 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
556 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
557 the opposite direction to BINOPTAB. */
558 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
560 carries = outof_input;
561 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
562 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
563 0, true, methods);
565 else
567 /* We must avoid shifting by BITS_PER_WORD bits since that is either
568 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
569 has unknown behavior. Do a single shift first, then shift by the
570 remainder. It's OK to use ~OP1 as the remainder if shift counts
571 are truncated to the mode size. */
572 carries = expand_binop (word_mode, reverse_unsigned_shift,
573 outof_input, const1_rtx, 0, unsignedp, methods);
574 if (shift_mask == BITS_PER_WORD - 1)
576 tmp = immed_double_const (-1, -1, op1_mode);
577 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
578 0, true, methods);
580 else
582 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
583 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
584 0, true, methods);
587 if (tmp == 0 || carries == 0)
588 return false;
589 carries = expand_binop (word_mode, reverse_unsigned_shift,
590 carries, tmp, 0, unsignedp, methods);
591 if (carries == 0)
592 return false;
594 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
595 so the result can go directly into INTO_TARGET if convenient. */
596 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
597 into_target, unsignedp, methods);
598 if (tmp == 0)
599 return false;
601 /* Now OR in the bits carried over from OUTOF_INPUT. */
602 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
603 into_target, unsignedp, methods))
604 return false;
606 /* Use a standard word_mode shift for the out-of half. */
607 if (outof_target != 0)
608 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
609 outof_target, unsignedp, methods))
610 return false;
612 return true;
616 #ifdef HAVE_conditional_move
617 /* Try implementing expand_doubleword_shift using conditional moves.
618 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
619 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
620 are the shift counts to use in the former and latter case. All other
621 arguments are the same as the parent routine. */
623 static bool
624 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
625 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
626 rtx outof_input, rtx into_input,
627 rtx subword_op1, rtx superword_op1,
628 rtx outof_target, rtx into_target,
629 int unsignedp, enum optab_methods methods,
630 unsigned HOST_WIDE_INT shift_mask)
632 rtx outof_superword, into_superword;
634 /* Put the superword version of the output into OUTOF_SUPERWORD and
635 INTO_SUPERWORD. */
636 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
637 if (outof_target != 0 && subword_op1 == superword_op1)
639 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
640 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
641 into_superword = outof_target;
642 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
643 outof_superword, 0, unsignedp, methods))
644 return false;
646 else
648 into_superword = gen_reg_rtx (word_mode);
649 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
650 outof_superword, into_superword,
651 unsignedp, methods))
652 return false;
655 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
656 if (!expand_subword_shift (op1_mode, binoptab,
657 outof_input, into_input, subword_op1,
658 outof_target, into_target,
659 unsignedp, methods, shift_mask))
660 return false;
662 /* Select between them. Do the INTO half first because INTO_SUPERWORD
663 might be the current value of OUTOF_TARGET. */
664 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
665 into_target, into_superword, word_mode, false))
666 return false;
668 if (outof_target != 0)
669 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
670 outof_target, outof_superword,
671 word_mode, false))
672 return false;
674 return true;
676 #endif
678 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
679 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
680 input operand; the shift moves bits in the direction OUTOF_INPUT->
681 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
682 of the target. OP1 is the shift count and OP1_MODE is its mode.
683 If OP1 is constant, it will have been truncated as appropriate
684 and is known to be nonzero.
686 If SHIFT_MASK is zero, the result of word shifts is undefined when the
687 shift count is outside the range [0, BITS_PER_WORD). This routine must
688 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
690 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
691 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
692 fill with zeros or sign bits as appropriate.
694 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
695 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
696 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
697 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
698 are undefined.
700 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
701 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
702 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
703 function wants to calculate it itself.
705 Return true if the shift could be successfully synthesized. */
707 static bool
708 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
709 rtx outof_input, rtx into_input, rtx op1,
710 rtx outof_target, rtx into_target,
711 int unsignedp, enum optab_methods methods,
712 unsigned HOST_WIDE_INT shift_mask)
714 rtx superword_op1, tmp, cmp1, cmp2;
715 rtx subword_label, done_label;
716 enum rtx_code cmp_code;
718 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
719 fill the result with sign or zero bits as appropriate. If so, the value
720 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
721 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
722 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
724 This isn't worthwhile for constant shifts since the optimizers will
725 cope better with in-range shift counts. */
726 if (shift_mask >= BITS_PER_WORD
727 && outof_target != 0
728 && !CONSTANT_P (op1))
730 if (!expand_doubleword_shift (op1_mode, binoptab,
731 outof_input, into_input, op1,
732 0, into_target,
733 unsignedp, methods, shift_mask))
734 return false;
735 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
736 outof_target, unsignedp, methods))
737 return false;
738 return true;
741 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
742 is true when the effective shift value is less than BITS_PER_WORD.
743 Set SUPERWORD_OP1 to the shift count that should be used to shift
744 OUTOF_INPUT into INTO_TARGET when the condition is false. */
745 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
746 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
748 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
749 is a subword shift count. */
750 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
751 0, true, methods);
752 cmp2 = CONST0_RTX (op1_mode);
753 cmp_code = EQ;
754 superword_op1 = op1;
756 else
758 /* Set CMP1 to OP1 - BITS_PER_WORD. */
759 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
760 0, true, methods);
761 cmp2 = CONST0_RTX (op1_mode);
762 cmp_code = LT;
763 superword_op1 = cmp1;
765 if (cmp1 == 0)
766 return false;
768 /* If we can compute the condition at compile time, pick the
769 appropriate subroutine. */
770 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
771 if (tmp != 0 && GET_CODE (tmp) == CONST_INT)
773 if (tmp == const0_rtx)
774 return expand_superword_shift (binoptab, outof_input, superword_op1,
775 outof_target, into_target,
776 unsignedp, methods);
777 else
778 return expand_subword_shift (op1_mode, binoptab,
779 outof_input, into_input, op1,
780 outof_target, into_target,
781 unsignedp, methods, shift_mask);
784 #ifdef HAVE_conditional_move
785 /* Try using conditional moves to generate straight-line code. */
787 rtx start = get_last_insn ();
788 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
789 cmp_code, cmp1, cmp2,
790 outof_input, into_input,
791 op1, superword_op1,
792 outof_target, into_target,
793 unsignedp, methods, shift_mask))
794 return true;
795 delete_insns_since (start);
797 #endif
799 /* As a last resort, use branches to select the correct alternative. */
800 subword_label = gen_label_rtx ();
801 done_label = gen_label_rtx ();
803 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
804 0, 0, subword_label);
806 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
807 outof_target, into_target,
808 unsignedp, methods))
809 return false;
811 emit_jump_insn (gen_jump (done_label));
812 emit_barrier ();
813 emit_label (subword_label);
815 if (!expand_subword_shift (op1_mode, binoptab,
816 outof_input, into_input, op1,
817 outof_target, into_target,
818 unsignedp, methods, shift_mask))
819 return false;
821 emit_label (done_label);
822 return true;
825 /* Subroutine of expand_binop. Perform a double word multiplication of
826 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
827 as the target's word_mode. This function return NULL_RTX if anything
828 goes wrong, in which case it may have already emitted instructions
829 which need to be deleted.
831 If we want to multiply two two-word values and have normal and widening
832 multiplies of single-word values, we can do this with three smaller
833 multiplications. Note that we do not make a REG_NO_CONFLICT block here
834 because we are not operating on one word at a time.
836 The multiplication proceeds as follows:
837 _______________________
838 [__op0_high_|__op0_low__]
839 _______________________
840 * [__op1_high_|__op1_low__]
841 _______________________________________________
842 _______________________
843 (1) [__op0_low__*__op1_low__]
844 _______________________
845 (2a) [__op0_low__*__op1_high_]
846 _______________________
847 (2b) [__op0_high_*__op1_low__]
848 _______________________
849 (3) [__op0_high_*__op1_high_]
852 This gives a 4-word result. Since we are only interested in the
853 lower 2 words, partial result (3) and the upper words of (2a) and
854 (2b) don't need to be calculated. Hence (2a) and (2b) can be
855 calculated using non-widening multiplication.
857 (1), however, needs to be calculated with an unsigned widening
858 multiplication. If this operation is not directly supported we
859 try using a signed widening multiplication and adjust the result.
860 This adjustment works as follows:
862 If both operands are positive then no adjustment is needed.
864 If the operands have different signs, for example op0_low < 0 and
865 op1_low >= 0, the instruction treats the most significant bit of
866 op0_low as a sign bit instead of a bit with significance
867 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
868 with 2**BITS_PER_WORD - op0_low, and two's complements the
869 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
870 the result.
872 Similarly, if both operands are negative, we need to add
873 (op0_low + op1_low) * 2**BITS_PER_WORD.
875 We use a trick to adjust quickly. We logically shift op0_low right
876 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
877 op0_high (op1_high) before it is used to calculate 2b (2a). If no
878 logical shift exists, we do an arithmetic right shift and subtract
879 the 0 or -1. */
881 static rtx
882 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
883 bool umulp, enum optab_methods methods)
885 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
886 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
887 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
888 rtx product, adjust, product_high, temp;
890 rtx op0_high = operand_subword_force (op0, high, mode);
891 rtx op0_low = operand_subword_force (op0, low, mode);
892 rtx op1_high = operand_subword_force (op1, high, mode);
893 rtx op1_low = operand_subword_force (op1, low, mode);
895 /* If we're using an unsigned multiply to directly compute the product
896 of the low-order words of the operands and perform any required
897 adjustments of the operands, we begin by trying two more multiplications
898 and then computing the appropriate sum.
900 We have checked above that the required addition is provided.
901 Full-word addition will normally always succeed, especially if
902 it is provided at all, so we don't worry about its failure. The
903 multiplication may well fail, however, so we do handle that. */
905 if (!umulp)
907 /* ??? This could be done with emit_store_flag where available. */
908 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
909 NULL_RTX, 1, methods);
910 if (temp)
911 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
912 NULL_RTX, 0, OPTAB_DIRECT);
913 else
915 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
916 NULL_RTX, 0, methods);
917 if (!temp)
918 return NULL_RTX;
919 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
920 NULL_RTX, 0, OPTAB_DIRECT);
923 if (!op0_high)
924 return NULL_RTX;
927 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
928 NULL_RTX, 0, OPTAB_DIRECT);
929 if (!adjust)
930 return NULL_RTX;
932 /* OP0_HIGH should now be dead. */
934 if (!umulp)
936 /* ??? This could be done with emit_store_flag where available. */
937 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
938 NULL_RTX, 1, methods);
939 if (temp)
940 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
941 NULL_RTX, 0, OPTAB_DIRECT);
942 else
944 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
945 NULL_RTX, 0, methods);
946 if (!temp)
947 return NULL_RTX;
948 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
949 NULL_RTX, 0, OPTAB_DIRECT);
952 if (!op1_high)
953 return NULL_RTX;
956 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
957 NULL_RTX, 0, OPTAB_DIRECT);
958 if (!temp)
959 return NULL_RTX;
961 /* OP1_HIGH should now be dead. */
963 adjust = expand_binop (word_mode, add_optab, adjust, temp,
964 adjust, 0, OPTAB_DIRECT);
966 if (target && !REG_P (target))
967 target = NULL_RTX;
969 if (umulp)
970 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
971 target, 1, OPTAB_DIRECT);
972 else
973 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
974 target, 1, OPTAB_DIRECT);
976 if (!product)
977 return NULL_RTX;
979 product_high = operand_subword (product, high, 1, mode);
980 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
981 REG_P (product_high) ? product_high : adjust,
982 0, OPTAB_DIRECT);
983 emit_move_insn (product_high, adjust);
984 return product;
987 /* Wrapper around expand_binop which takes an rtx code to specify
988 the operation to perform, not an optab pointer. All other
989 arguments are the same. */
991 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
992 rtx op1, rtx target, int unsignedp,
993 enum optab_methods methods)
995 optab binop = code_to_optab[(int) code];
996 gcc_assert (binop);
998 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1001 /* Generate code to perform an operation specified by BINOPTAB
1002 on operands OP0 and OP1, with result having machine-mode MODE.
1004 UNSIGNEDP is for the case where we have to widen the operands
1005 to perform the operation. It says to use zero-extension.
1007 If TARGET is nonzero, the value
1008 is generated there, if it is convenient to do so.
1009 In all cases an rtx is returned for the locus of the value;
1010 this may or may not be TARGET. */
1013 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
1014 rtx target, int unsignedp, enum optab_methods methods)
1016 enum optab_methods next_methods
1017 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1018 ? OPTAB_WIDEN : methods);
1019 enum mode_class class;
1020 enum machine_mode wider_mode;
1021 rtx temp;
1022 int commutative_op = 0;
1023 int shift_op = (binoptab->code == ASHIFT
1024 || binoptab->code == ASHIFTRT
1025 || binoptab->code == LSHIFTRT
1026 || binoptab->code == ROTATE
1027 || binoptab->code == ROTATERT);
1028 rtx entry_last = get_last_insn ();
1029 rtx last;
1031 class = GET_MODE_CLASS (mode);
1033 if (flag_force_mem)
1035 /* Load duplicate non-volatile operands once. */
1036 if (rtx_equal_p (op0, op1) && ! volatile_refs_p (op0))
1038 op0 = force_not_mem (op0);
1039 op1 = op0;
1041 else
1043 op0 = force_not_mem (op0);
1044 op1 = force_not_mem (op1);
1048 /* If subtracting an integer constant, convert this into an addition of
1049 the negated constant. */
1051 if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
1053 op1 = negate_rtx (mode, op1);
1054 binoptab = add_optab;
1057 /* If we are inside an appropriately-short loop and we are optimizing,
1058 force expensive constants into a register. */
1059 if (CONSTANT_P (op0) && optimize
1060 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
1062 if (GET_MODE (op0) != VOIDmode)
1063 op0 = convert_modes (mode, VOIDmode, op0, unsignedp);
1064 op0 = force_reg (mode, op0);
1067 if (CONSTANT_P (op1) && optimize
1068 && ! shift_op && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
1070 if (GET_MODE (op1) != VOIDmode)
1071 op1 = convert_modes (mode, VOIDmode, op1, unsignedp);
1072 op1 = force_reg (mode, op1);
1075 /* Record where to delete back to if we backtrack. */
1076 last = get_last_insn ();
1078 /* If operation is commutative,
1079 try to make the first operand a register.
1080 Even better, try to make it the same as the target.
1081 Also try to make the last operand a constant. */
1082 if (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1083 || binoptab == smul_widen_optab
1084 || binoptab == umul_widen_optab
1085 || binoptab == smul_highpart_optab
1086 || binoptab == umul_highpart_optab)
1088 commutative_op = 1;
1090 if (((target == 0 || REG_P (target))
1091 ? ((REG_P (op1)
1092 && !REG_P (op0))
1093 || target == op1)
1094 : rtx_equal_p (op1, target))
1095 || GET_CODE (op0) == CONST_INT)
1097 temp = op1;
1098 op1 = op0;
1099 op0 = temp;
1103 /* If we can do it with a three-operand insn, do so. */
1105 if (methods != OPTAB_MUST_WIDEN
1106 && binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1108 int icode = (int) binoptab->handlers[(int) mode].insn_code;
1109 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1110 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1111 rtx pat;
1112 rtx xop0 = op0, xop1 = op1;
1114 if (target)
1115 temp = target;
1116 else
1117 temp = gen_reg_rtx (mode);
1119 /* If it is a commutative operator and the modes would match
1120 if we would swap the operands, we can save the conversions. */
1121 if (commutative_op)
1123 if (GET_MODE (op0) != mode0 && GET_MODE (op1) != mode1
1124 && GET_MODE (op0) == mode1 && GET_MODE (op1) == mode0)
1126 rtx tmp;
1128 tmp = op0; op0 = op1; op1 = tmp;
1129 tmp = xop0; xop0 = xop1; xop1 = tmp;
1133 /* In case the insn wants input operands in modes different from
1134 those of the actual operands, convert the operands. It would
1135 seem that we don't need to convert CONST_INTs, but we do, so
1136 that they're properly zero-extended, sign-extended or truncated
1137 for their mode. */
1139 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
1140 xop0 = convert_modes (mode0,
1141 GET_MODE (op0) != VOIDmode
1142 ? GET_MODE (op0)
1143 : mode,
1144 xop0, unsignedp);
1146 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
1147 xop1 = convert_modes (mode1,
1148 GET_MODE (op1) != VOIDmode
1149 ? GET_MODE (op1)
1150 : mode,
1151 xop1, unsignedp);
1153 /* Now, if insn's predicates don't allow our operands, put them into
1154 pseudo regs. */
1156 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
1157 && mode0 != VOIDmode)
1158 xop0 = copy_to_mode_reg (mode0, xop0);
1160 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
1161 && mode1 != VOIDmode)
1162 xop1 = copy_to_mode_reg (mode1, xop1);
1164 if (!insn_data[icode].operand[0].predicate (temp, mode))
1165 temp = gen_reg_rtx (mode);
1167 pat = GEN_FCN (icode) (temp, xop0, xop1);
1168 if (pat)
1170 /* If PAT is composed of more than one insn, try to add an appropriate
1171 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1172 operand, call ourselves again, this time without a target. */
1173 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1174 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
1176 delete_insns_since (last);
1177 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1178 unsignedp, methods);
1181 emit_insn (pat);
1182 return temp;
1184 else
1185 delete_insns_since (last);
1188 /* If this is a multiply, see if we can do a widening operation that
1189 takes operands of this mode and makes a wider mode. */
1191 if (binoptab == smul_optab && GET_MODE_WIDER_MODE (mode) != VOIDmode
1192 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1193 ->handlers[(int) GET_MODE_WIDER_MODE (mode)].insn_code)
1194 != CODE_FOR_nothing))
1196 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
1197 unsignedp ? umul_widen_optab : smul_widen_optab,
1198 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1200 if (temp != 0)
1202 if (GET_MODE_CLASS (mode) == MODE_INT)
1203 return gen_lowpart (mode, temp);
1204 else
1205 return convert_to_mode (mode, temp, unsignedp);
1209 /* Look for a wider mode of the same class for which we think we
1210 can open-code the operation. Check for a widening multiply at the
1211 wider mode as well. */
1213 if ((class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
1214 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1215 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
1216 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1218 if (binoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
1219 || (binoptab == smul_optab
1220 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1221 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1222 ->handlers[(int) GET_MODE_WIDER_MODE (wider_mode)].insn_code)
1223 != CODE_FOR_nothing)))
1225 rtx xop0 = op0, xop1 = op1;
1226 int no_extend = 0;
1228 /* For certain integer operations, we need not actually extend
1229 the narrow operands, as long as we will truncate
1230 the results to the same narrowness. */
1232 if ((binoptab == ior_optab || binoptab == and_optab
1233 || binoptab == xor_optab
1234 || binoptab == add_optab || binoptab == sub_optab
1235 || binoptab == smul_optab || binoptab == ashl_optab)
1236 && class == MODE_INT)
1237 no_extend = 1;
1239 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1241 /* The second operand of a shift must always be extended. */
1242 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1243 no_extend && binoptab != ashl_optab);
1245 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1246 unsignedp, OPTAB_DIRECT);
1247 if (temp)
1249 if (class != MODE_INT)
1251 if (target == 0)
1252 target = gen_reg_rtx (mode);
1253 convert_move (target, temp, 0);
1254 return target;
1256 else
1257 return gen_lowpart (mode, temp);
1259 else
1260 delete_insns_since (last);
1264 /* These can be done a word at a time. */
1265 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1266 && class == MODE_INT
1267 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1268 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1270 int i;
1271 rtx insns;
1272 rtx equiv_value;
1274 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1275 won't be accurate, so use a new target. */
1276 if (target == 0 || target == op0 || target == op1)
1277 target = gen_reg_rtx (mode);
1279 start_sequence ();
1281 /* Do the actual arithmetic. */
1282 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1284 rtx target_piece = operand_subword (target, i, 1, mode);
1285 rtx x = expand_binop (word_mode, binoptab,
1286 operand_subword_force (op0, i, mode),
1287 operand_subword_force (op1, i, mode),
1288 target_piece, unsignedp, next_methods);
1290 if (x == 0)
1291 break;
1293 if (target_piece != x)
1294 emit_move_insn (target_piece, x);
1297 insns = get_insns ();
1298 end_sequence ();
1300 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1302 if (binoptab->code != UNKNOWN)
1303 equiv_value
1304 = gen_rtx_fmt_ee (binoptab->code, mode,
1305 copy_rtx (op0), copy_rtx (op1));
1306 else
1307 equiv_value = 0;
1309 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1310 return target;
1314 /* Synthesize double word shifts from single word shifts. */
1315 if ((binoptab == lshr_optab || binoptab == ashl_optab
1316 || binoptab == ashr_optab)
1317 && class == MODE_INT
1318 && (GET_CODE (op1) == CONST_INT || !optimize_size)
1319 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1320 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1321 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1322 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1324 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1325 enum machine_mode op1_mode;
1327 double_shift_mask = targetm.shift_truncation_mask (mode);
1328 shift_mask = targetm.shift_truncation_mask (word_mode);
1329 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1331 /* Apply the truncation to constant shifts. */
1332 if (double_shift_mask > 0 && GET_CODE (op1) == CONST_INT)
1333 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1335 if (op1 == CONST0_RTX (op1_mode))
1336 return op0;
1338 /* Make sure that this is a combination that expand_doubleword_shift
1339 can handle. See the comments there for details. */
1340 if (double_shift_mask == 0
1341 || (shift_mask == BITS_PER_WORD - 1
1342 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1344 rtx insns, equiv_value;
1345 rtx into_target, outof_target;
1346 rtx into_input, outof_input;
1347 int left_shift, outof_word;
1349 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1350 won't be accurate, so use a new target. */
1351 if (target == 0 || target == op0 || target == op1)
1352 target = gen_reg_rtx (mode);
1354 start_sequence ();
1356 /* OUTOF_* is the word we are shifting bits away from, and
1357 INTO_* is the word that we are shifting bits towards, thus
1358 they differ depending on the direction of the shift and
1359 WORDS_BIG_ENDIAN. */
1361 left_shift = binoptab == ashl_optab;
1362 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1364 outof_target = operand_subword (target, outof_word, 1, mode);
1365 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1367 outof_input = operand_subword_force (op0, outof_word, mode);
1368 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1370 if (expand_doubleword_shift (op1_mode, binoptab,
1371 outof_input, into_input, op1,
1372 outof_target, into_target,
1373 unsignedp, methods, shift_mask))
1375 insns = get_insns ();
1376 end_sequence ();
1378 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1379 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1380 return target;
1382 end_sequence ();
1386 /* Synthesize double word rotates from single word shifts. */
1387 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1388 && class == MODE_INT
1389 && GET_CODE (op1) == CONST_INT
1390 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1391 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1392 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1394 rtx insns, equiv_value;
1395 rtx into_target, outof_target;
1396 rtx into_input, outof_input;
1397 rtx inter;
1398 int shift_count, left_shift, outof_word;
1400 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1401 won't be accurate, so use a new target. Do this also if target is not
1402 a REG, first because having a register instead may open optimization
1403 opportunities, and second because if target and op0 happen to be MEMs
1404 designating the same location, we would risk clobbering it too early
1405 in the code sequence we generate below. */
1406 if (target == 0 || target == op0 || target == op1 || ! REG_P (target))
1407 target = gen_reg_rtx (mode);
1409 start_sequence ();
1411 shift_count = INTVAL (op1);
1413 /* OUTOF_* is the word we are shifting bits away from, and
1414 INTO_* is the word that we are shifting bits towards, thus
1415 they differ depending on the direction of the shift and
1416 WORDS_BIG_ENDIAN. */
1418 left_shift = (binoptab == rotl_optab);
1419 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1421 outof_target = operand_subword (target, outof_word, 1, mode);
1422 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1424 outof_input = operand_subword_force (op0, outof_word, mode);
1425 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1427 if (shift_count == BITS_PER_WORD)
1429 /* This is just a word swap. */
1430 emit_move_insn (outof_target, into_input);
1431 emit_move_insn (into_target, outof_input);
1432 inter = const0_rtx;
1434 else
1436 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1437 rtx first_shift_count, second_shift_count;
1438 optab reverse_unsigned_shift, unsigned_shift;
1440 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1441 ? lshr_optab : ashl_optab);
1443 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1444 ? ashl_optab : lshr_optab);
1446 if (shift_count > BITS_PER_WORD)
1448 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1449 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1451 else
1453 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1454 second_shift_count = GEN_INT (shift_count);
1457 into_temp1 = expand_binop (word_mode, unsigned_shift,
1458 outof_input, first_shift_count,
1459 NULL_RTX, unsignedp, next_methods);
1460 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1461 into_input, second_shift_count,
1462 NULL_RTX, unsignedp, next_methods);
1464 if (into_temp1 != 0 && into_temp2 != 0)
1465 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1466 into_target, unsignedp, next_methods);
1467 else
1468 inter = 0;
1470 if (inter != 0 && inter != into_target)
1471 emit_move_insn (into_target, inter);
1473 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1474 into_input, first_shift_count,
1475 NULL_RTX, unsignedp, next_methods);
1476 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1477 outof_input, second_shift_count,
1478 NULL_RTX, unsignedp, next_methods);
1480 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1481 inter = expand_binop (word_mode, ior_optab,
1482 outof_temp1, outof_temp2,
1483 outof_target, unsignedp, next_methods);
1485 if (inter != 0 && inter != outof_target)
1486 emit_move_insn (outof_target, inter);
1489 insns = get_insns ();
1490 end_sequence ();
1492 if (inter != 0)
1494 if (binoptab->code != UNKNOWN)
1495 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1496 else
1497 equiv_value = 0;
1499 /* We can't make this a no conflict block if this is a word swap,
1500 because the word swap case fails if the input and output values
1501 are in the same register. */
1502 if (shift_count != BITS_PER_WORD)
1503 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1504 else
1505 emit_insn (insns);
1508 return target;
1512 /* These can be done a word at a time by propagating carries. */
1513 if ((binoptab == add_optab || binoptab == sub_optab)
1514 && class == MODE_INT
1515 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1516 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1518 unsigned int i;
1519 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1520 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1521 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1522 rtx xop0, xop1, xtarget;
1524 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1525 value is one of those, use it. Otherwise, use 1 since it is the
1526 one easiest to get. */
1527 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1528 int normalizep = STORE_FLAG_VALUE;
1529 #else
1530 int normalizep = 1;
1531 #endif
1533 /* Prepare the operands. */
1534 xop0 = force_reg (mode, op0);
1535 xop1 = force_reg (mode, op1);
1537 xtarget = gen_reg_rtx (mode);
1539 if (target == 0 || !REG_P (target))
1540 target = xtarget;
1542 /* Indicate for flow that the entire target reg is being set. */
1543 if (REG_P (target))
1544 emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget));
1546 /* Do the actual arithmetic. */
1547 for (i = 0; i < nwords; i++)
1549 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1550 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1551 rtx op0_piece = operand_subword_force (xop0, index, mode);
1552 rtx op1_piece = operand_subword_force (xop1, index, mode);
1553 rtx x;
1555 /* Main add/subtract of the input operands. */
1556 x = expand_binop (word_mode, binoptab,
1557 op0_piece, op1_piece,
1558 target_piece, unsignedp, next_methods);
1559 if (x == 0)
1560 break;
1562 if (i + 1 < nwords)
1564 /* Store carry from main add/subtract. */
1565 carry_out = gen_reg_rtx (word_mode);
1566 carry_out = emit_store_flag_force (carry_out,
1567 (binoptab == add_optab
1568 ? LT : GT),
1569 x, op0_piece,
1570 word_mode, 1, normalizep);
1573 if (i > 0)
1575 rtx newx;
1577 /* Add/subtract previous carry to main result. */
1578 newx = expand_binop (word_mode,
1579 normalizep == 1 ? binoptab : otheroptab,
1580 x, carry_in,
1581 NULL_RTX, 1, next_methods);
1583 if (i + 1 < nwords)
1585 /* Get out carry from adding/subtracting carry in. */
1586 rtx carry_tmp = gen_reg_rtx (word_mode);
1587 carry_tmp = emit_store_flag_force (carry_tmp,
1588 (binoptab == add_optab
1589 ? LT : GT),
1590 newx, x,
1591 word_mode, 1, normalizep);
1593 /* Logical-ior the two poss. carry together. */
1594 carry_out = expand_binop (word_mode, ior_optab,
1595 carry_out, carry_tmp,
1596 carry_out, 0, next_methods);
1597 if (carry_out == 0)
1598 break;
1600 emit_move_insn (target_piece, newx);
1602 else
1604 if (x != target_piece)
1605 emit_move_insn (target_piece, x);
1608 carry_in = carry_out;
1611 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
1613 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
1614 || ! rtx_equal_p (target, xtarget))
1616 rtx temp = emit_move_insn (target, xtarget);
1618 set_unique_reg_note (temp,
1619 REG_EQUAL,
1620 gen_rtx_fmt_ee (binoptab->code, mode,
1621 copy_rtx (xop0),
1622 copy_rtx (xop1)));
1624 else
1625 target = xtarget;
1627 return target;
1630 else
1631 delete_insns_since (last);
1634 /* Attempt to synthesize double word multiplies using a sequence of word
1635 mode multiplications. We first attempt to generate a sequence using a
1636 more efficient unsigned widening multiply, and if that fails we then
1637 try using a signed widening multiply. */
1639 if (binoptab == smul_optab
1640 && class == MODE_INT
1641 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1642 && smul_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1643 && add_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1645 rtx product = NULL_RTX;
1647 if (umul_widen_optab->handlers[(int) mode].insn_code
1648 != CODE_FOR_nothing)
1650 product = expand_doubleword_mult (mode, op0, op1, target,
1651 true, methods);
1652 if (!product)
1653 delete_insns_since (last);
1656 if (product == NULL_RTX
1657 && smul_widen_optab->handlers[(int) mode].insn_code
1658 != CODE_FOR_nothing)
1660 product = expand_doubleword_mult (mode, op0, op1, target,
1661 false, methods);
1662 if (!product)
1663 delete_insns_since (last);
1666 if (product != NULL_RTX)
1668 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1670 temp = emit_move_insn (target ? target : product, product);
1671 set_unique_reg_note (temp,
1672 REG_EQUAL,
1673 gen_rtx_fmt_ee (MULT, mode,
1674 copy_rtx (op0),
1675 copy_rtx (op1)));
1677 return product;
1681 /* It can't be open-coded in this mode.
1682 Use a library call if one is available and caller says that's ok. */
1684 if (binoptab->handlers[(int) mode].libfunc
1685 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1687 rtx insns;
1688 rtx op1x = op1;
1689 enum machine_mode op1_mode = mode;
1690 rtx value;
1692 start_sequence ();
1694 if (shift_op)
1696 op1_mode = word_mode;
1697 /* Specify unsigned here,
1698 since negative shift counts are meaningless. */
1699 op1x = convert_to_mode (word_mode, op1, 1);
1702 if (GET_MODE (op0) != VOIDmode
1703 && GET_MODE (op0) != mode)
1704 op0 = convert_to_mode (mode, op0, unsignedp);
1706 /* Pass 1 for NO_QUEUE so we don't lose any increments
1707 if the libcall is cse'd or moved. */
1708 value = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
1709 NULL_RTX, LCT_CONST, mode, 2,
1710 op0, mode, op1x, op1_mode);
1712 insns = get_insns ();
1713 end_sequence ();
1715 target = gen_reg_rtx (mode);
1716 emit_libcall_block (insns, target, value,
1717 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
1719 return target;
1722 delete_insns_since (last);
1724 /* It can't be done in this mode. Can we do it in a wider mode? */
1726 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
1727 || methods == OPTAB_MUST_WIDEN))
1729 /* Caller says, don't even try. */
1730 delete_insns_since (entry_last);
1731 return 0;
1734 /* Compute the value of METHODS to pass to recursive calls.
1735 Don't allow widening to be tried recursively. */
1737 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
1739 /* Look for a wider mode of the same class for which it appears we can do
1740 the operation. */
1742 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
1744 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
1745 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1747 if ((binoptab->handlers[(int) wider_mode].insn_code
1748 != CODE_FOR_nothing)
1749 || (methods == OPTAB_LIB
1750 && binoptab->handlers[(int) wider_mode].libfunc))
1752 rtx xop0 = op0, xop1 = op1;
1753 int no_extend = 0;
1755 /* For certain integer operations, we need not actually extend
1756 the narrow operands, as long as we will truncate
1757 the results to the same narrowness. */
1759 if ((binoptab == ior_optab || binoptab == and_optab
1760 || binoptab == xor_optab
1761 || binoptab == add_optab || binoptab == sub_optab
1762 || binoptab == smul_optab || binoptab == ashl_optab)
1763 && class == MODE_INT)
1764 no_extend = 1;
1766 xop0 = widen_operand (xop0, wider_mode, mode,
1767 unsignedp, no_extend);
1769 /* The second operand of a shift must always be extended. */
1770 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1771 no_extend && binoptab != ashl_optab);
1773 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1774 unsignedp, methods);
1775 if (temp)
1777 if (class != MODE_INT)
1779 if (target == 0)
1780 target = gen_reg_rtx (mode);
1781 convert_move (target, temp, 0);
1782 return target;
1784 else
1785 return gen_lowpart (mode, temp);
1787 else
1788 delete_insns_since (last);
1793 delete_insns_since (entry_last);
1794 return 0;
1797 /* Expand a binary operator which has both signed and unsigned forms.
1798 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1799 signed operations.
1801 If we widen unsigned operands, we may use a signed wider operation instead
1802 of an unsigned wider operation, since the result would be the same. */
1805 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
1806 rtx op0, rtx op1, rtx target, int unsignedp,
1807 enum optab_methods methods)
1809 rtx temp;
1810 optab direct_optab = unsignedp ? uoptab : soptab;
1811 struct optab wide_soptab;
1813 /* Do it without widening, if possible. */
1814 temp = expand_binop (mode, direct_optab, op0, op1, target,
1815 unsignedp, OPTAB_DIRECT);
1816 if (temp || methods == OPTAB_DIRECT)
1817 return temp;
1819 /* Try widening to a signed int. Make a fake signed optab that
1820 hides any signed insn for direct use. */
1821 wide_soptab = *soptab;
1822 wide_soptab.handlers[(int) mode].insn_code = CODE_FOR_nothing;
1823 wide_soptab.handlers[(int) mode].libfunc = 0;
1825 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
1826 unsignedp, OPTAB_WIDEN);
1828 /* For unsigned operands, try widening to an unsigned int. */
1829 if (temp == 0 && unsignedp)
1830 temp = expand_binop (mode, uoptab, op0, op1, target,
1831 unsignedp, OPTAB_WIDEN);
1832 if (temp || methods == OPTAB_WIDEN)
1833 return temp;
1835 /* Use the right width lib call if that exists. */
1836 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
1837 if (temp || methods == OPTAB_LIB)
1838 return temp;
1840 /* Must widen and use a lib call, use either signed or unsigned. */
1841 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
1842 unsignedp, methods);
1843 if (temp != 0)
1844 return temp;
1845 if (unsignedp)
1846 return expand_binop (mode, uoptab, op0, op1, target,
1847 unsignedp, methods);
1848 return 0;
1851 /* Generate code to perform an operation specified by UNOPPTAB
1852 on operand OP0, with two results to TARG0 and TARG1.
1853 We assume that the order of the operands for the instruction
1854 is TARG0, TARG1, OP0.
1856 Either TARG0 or TARG1 may be zero, but what that means is that
1857 the result is not actually wanted. We will generate it into
1858 a dummy pseudo-reg and discard it. They may not both be zero.
1860 Returns 1 if this operation can be performed; 0 if not. */
1863 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
1864 int unsignedp)
1866 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
1867 enum mode_class class;
1868 enum machine_mode wider_mode;
1869 rtx entry_last = get_last_insn ();
1870 rtx last;
1872 class = GET_MODE_CLASS (mode);
1874 if (flag_force_mem)
1875 op0 = force_not_mem (op0);
1877 if (!targ0)
1878 targ0 = gen_reg_rtx (mode);
1879 if (!targ1)
1880 targ1 = gen_reg_rtx (mode);
1882 /* Record where to go back to if we fail. */
1883 last = get_last_insn ();
1885 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1887 int icode = (int) unoptab->handlers[(int) mode].insn_code;
1888 enum machine_mode mode0 = insn_data[icode].operand[2].mode;
1889 rtx pat;
1890 rtx xop0 = op0;
1892 if (GET_MODE (xop0) != VOIDmode
1893 && GET_MODE (xop0) != mode0)
1894 xop0 = convert_to_mode (mode0, xop0, unsignedp);
1896 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1897 if (!insn_data[icode].operand[2].predicate (xop0, mode0))
1898 xop0 = copy_to_mode_reg (mode0, xop0);
1900 /* We could handle this, but we should always be called with a pseudo
1901 for our targets and all insns should take them as outputs. */
1902 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
1903 gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
1905 pat = GEN_FCN (icode) (targ0, targ1, xop0);
1906 if (pat)
1908 emit_insn (pat);
1909 return 1;
1911 else
1912 delete_insns_since (last);
1915 /* It can't be done in this mode. Can we do it in a wider mode? */
1917 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
1919 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
1920 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1922 if (unoptab->handlers[(int) wider_mode].insn_code
1923 != CODE_FOR_nothing)
1925 rtx t0 = gen_reg_rtx (wider_mode);
1926 rtx t1 = gen_reg_rtx (wider_mode);
1927 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
1929 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
1931 convert_move (targ0, t0, unsignedp);
1932 convert_move (targ1, t1, unsignedp);
1933 return 1;
1935 else
1936 delete_insns_since (last);
1941 delete_insns_since (entry_last);
1942 return 0;
1945 /* Generate code to perform an operation specified by BINOPTAB
1946 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1947 We assume that the order of the operands for the instruction
1948 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1949 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1951 Either TARG0 or TARG1 may be zero, but what that means is that
1952 the result is not actually wanted. We will generate it into
1953 a dummy pseudo-reg and discard it. They may not both be zero.
1955 Returns 1 if this operation can be performed; 0 if not. */
1958 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
1959 int unsignedp)
1961 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
1962 enum mode_class class;
1963 enum machine_mode wider_mode;
1964 rtx entry_last = get_last_insn ();
1965 rtx last;
1967 class = GET_MODE_CLASS (mode);
1969 if (flag_force_mem)
1971 op0 = force_not_mem (op0);
1972 op1 = force_not_mem (op1);
1975 /* If we are inside an appropriately-short loop and we are optimizing,
1976 force expensive constants into a register. */
1977 if (CONSTANT_P (op0) && optimize
1978 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
1979 op0 = force_reg (mode, op0);
1981 if (CONSTANT_P (op1) && optimize
1982 && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
1983 op1 = force_reg (mode, op1);
1985 if (!targ0)
1986 targ0 = gen_reg_rtx (mode);
1987 if (!targ1)
1988 targ1 = gen_reg_rtx (mode);
1990 /* Record where to go back to if we fail. */
1991 last = get_last_insn ();
1993 if (binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1995 int icode = (int) binoptab->handlers[(int) mode].insn_code;
1996 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1997 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1998 rtx pat;
1999 rtx xop0 = op0, xop1 = op1;
2001 /* In case the insn wants input operands in modes different from
2002 those of the actual operands, convert the operands. It would
2003 seem that we don't need to convert CONST_INTs, but we do, so
2004 that they're properly zero-extended, sign-extended or truncated
2005 for their mode. */
2007 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
2008 xop0 = convert_modes (mode0,
2009 GET_MODE (op0) != VOIDmode
2010 ? GET_MODE (op0)
2011 : mode,
2012 xop0, unsignedp);
2014 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
2015 xop1 = convert_modes (mode1,
2016 GET_MODE (op1) != VOIDmode
2017 ? GET_MODE (op1)
2018 : mode,
2019 xop1, unsignedp);
2021 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2022 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2023 xop0 = copy_to_mode_reg (mode0, xop0);
2025 if (!insn_data[icode].operand[2].predicate (xop1, mode1))
2026 xop1 = copy_to_mode_reg (mode1, xop1);
2028 /* We could handle this, but we should always be called with a pseudo
2029 for our targets and all insns should take them as outputs. */
2030 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2031 gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
2033 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
2034 if (pat)
2036 emit_insn (pat);
2037 return 1;
2039 else
2040 delete_insns_since (last);
2043 /* It can't be done in this mode. Can we do it in a wider mode? */
2045 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2047 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2048 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2050 if (binoptab->handlers[(int) wider_mode].insn_code
2051 != CODE_FOR_nothing)
2053 rtx t0 = gen_reg_rtx (wider_mode);
2054 rtx t1 = gen_reg_rtx (wider_mode);
2055 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2056 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2058 if (expand_twoval_binop (binoptab, cop0, cop1,
2059 t0, t1, unsignedp))
2061 convert_move (targ0, t0, unsignedp);
2062 convert_move (targ1, t1, unsignedp);
2063 return 1;
2065 else
2066 delete_insns_since (last);
2071 delete_insns_since (entry_last);
2072 return 0;
2075 /* Expand the two-valued library call indicated by BINOPTAB, but
2076 preserve only one of the values. If TARG0 is non-NULL, the first
2077 value is placed into TARG0; otherwise the second value is placed
2078 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2079 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2080 This routine assumes that the value returned by the library call is
2081 as if the return value was of an integral mode twice as wide as the
2082 mode of OP0. Returns 1 if the call was successful. */
2084 bool
2085 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2086 rtx targ0, rtx targ1, enum rtx_code code)
2088 enum machine_mode mode;
2089 enum machine_mode libval_mode;
2090 rtx libval;
2091 rtx insns;
2093 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2094 gcc_assert (!targ0 != !targ1);
2096 mode = GET_MODE (op0);
2097 if (!binoptab->handlers[(int) mode].libfunc)
2098 return false;
2100 /* The value returned by the library function will have twice as
2101 many bits as the nominal MODE. */
2102 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2103 MODE_INT);
2104 start_sequence ();
2105 libval = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
2106 NULL_RTX, LCT_CONST,
2107 libval_mode, 2,
2108 op0, mode,
2109 op1, mode);
2110 /* Get the part of VAL containing the value that we want. */
2111 libval = simplify_gen_subreg (mode, libval, libval_mode,
2112 targ0 ? 0 : GET_MODE_SIZE (mode));
2113 insns = get_insns ();
2114 end_sequence ();
2115 /* Move the into the desired location. */
2116 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2117 gen_rtx_fmt_ee (code, mode, op0, op1));
2119 return true;
2123 /* Wrapper around expand_unop which takes an rtx code to specify
2124 the operation to perform, not an optab pointer. All other
2125 arguments are the same. */
2127 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2128 rtx target, int unsignedp)
2130 optab unop = code_to_optab[(int) code];
2131 gcc_assert (unop);
2133 return expand_unop (mode, unop, op0, target, unsignedp);
2136 /* Try calculating
2137 (clz:narrow x)
2139 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2140 static rtx
2141 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2143 enum mode_class class = GET_MODE_CLASS (mode);
2144 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2146 enum machine_mode wider_mode;
2147 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2148 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2150 if (clz_optab->handlers[(int) wider_mode].insn_code
2151 != CODE_FOR_nothing)
2153 rtx xop0, temp, last;
2155 last = get_last_insn ();
2157 if (target == 0)
2158 target = gen_reg_rtx (mode);
2159 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2160 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2161 if (temp != 0)
2162 temp = expand_binop (wider_mode, sub_optab, temp,
2163 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2164 - GET_MODE_BITSIZE (mode)),
2165 target, true, OPTAB_DIRECT);
2166 if (temp == 0)
2167 delete_insns_since (last);
2169 return temp;
2173 return 0;
2176 /* Try calculating (parity x) as (and (popcount x) 1), where
2177 popcount can also be done in a wider mode. */
2178 static rtx
2179 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2181 enum mode_class class = GET_MODE_CLASS (mode);
2182 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2184 enum machine_mode wider_mode;
2185 for (wider_mode = mode; wider_mode != VOIDmode;
2186 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2188 if (popcount_optab->handlers[(int) wider_mode].insn_code
2189 != CODE_FOR_nothing)
2191 rtx xop0, temp, last;
2193 last = get_last_insn ();
2195 if (target == 0)
2196 target = gen_reg_rtx (mode);
2197 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2198 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2199 true);
2200 if (temp != 0)
2201 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2202 target, true, OPTAB_DIRECT);
2203 if (temp == 0)
2204 delete_insns_since (last);
2206 return temp;
2210 return 0;
2213 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2214 conditions, VAL may already be a SUBREG against which we cannot generate
2215 a further SUBREG. In this case, we expect forcing the value into a
2216 register will work around the situation. */
2218 static rtx
2219 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2220 enum machine_mode imode)
2222 rtx ret;
2223 ret = lowpart_subreg (omode, val, imode);
2224 if (ret == NULL)
2226 val = force_reg (imode, val);
2227 ret = lowpart_subreg (omode, val, imode);
2228 gcc_assert (ret != NULL);
2230 return ret;
2233 /* Expand a floating point absolute value or negation operation via a
2234 logical operation on the sign bit. */
2236 static rtx
2237 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2238 rtx op0, rtx target)
2240 const struct real_format *fmt;
2241 int bitpos, word, nwords, i;
2242 enum machine_mode imode;
2243 HOST_WIDE_INT hi, lo;
2244 rtx temp, insns;
2246 /* The format has to have a simple sign bit. */
2247 fmt = REAL_MODE_FORMAT (mode);
2248 if (fmt == NULL)
2249 return NULL_RTX;
2251 bitpos = fmt->signbit_rw;
2252 if (bitpos < 0)
2253 return NULL_RTX;
2255 /* Don't create negative zeros if the format doesn't support them. */
2256 if (code == NEG && !fmt->has_signed_zero)
2257 return NULL_RTX;
2259 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2261 imode = int_mode_for_mode (mode);
2262 if (imode == BLKmode)
2263 return NULL_RTX;
2264 word = 0;
2265 nwords = 1;
2267 else
2269 imode = word_mode;
2271 if (FLOAT_WORDS_BIG_ENDIAN)
2272 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2273 else
2274 word = bitpos / BITS_PER_WORD;
2275 bitpos = bitpos % BITS_PER_WORD;
2276 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2279 if (bitpos < HOST_BITS_PER_WIDE_INT)
2281 hi = 0;
2282 lo = (HOST_WIDE_INT) 1 << bitpos;
2284 else
2286 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2287 lo = 0;
2289 if (code == ABS)
2290 lo = ~lo, hi = ~hi;
2292 if (target == 0 || target == op0)
2293 target = gen_reg_rtx (mode);
2295 if (nwords > 1)
2297 start_sequence ();
2299 for (i = 0; i < nwords; ++i)
2301 rtx targ_piece = operand_subword (target, i, 1, mode);
2302 rtx op0_piece = operand_subword_force (op0, i, mode);
2304 if (i == word)
2306 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2307 op0_piece,
2308 immed_double_const (lo, hi, imode),
2309 targ_piece, 1, OPTAB_LIB_WIDEN);
2310 if (temp != targ_piece)
2311 emit_move_insn (targ_piece, temp);
2313 else
2314 emit_move_insn (targ_piece, op0_piece);
2317 insns = get_insns ();
2318 end_sequence ();
2320 temp = gen_rtx_fmt_e (code, mode, copy_rtx (op0));
2321 emit_no_conflict_block (insns, target, op0, NULL_RTX, temp);
2323 else
2325 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2326 gen_lowpart (imode, op0),
2327 immed_double_const (lo, hi, imode),
2328 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2329 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2331 set_unique_reg_note (get_last_insn (), REG_EQUAL,
2332 gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
2335 return target;
2338 /* Generate code to perform an operation specified by UNOPTAB
2339 on operand OP0, with result having machine-mode MODE.
2341 UNSIGNEDP is for the case where we have to widen the operands
2342 to perform the operation. It says to use zero-extension.
2344 If TARGET is nonzero, the value
2345 is generated there, if it is convenient to do so.
2346 In all cases an rtx is returned for the locus of the value;
2347 this may or may not be TARGET. */
2350 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
2351 int unsignedp)
2353 enum mode_class class;
2354 enum machine_mode wider_mode;
2355 rtx temp;
2356 rtx last = get_last_insn ();
2357 rtx pat;
2359 class = GET_MODE_CLASS (mode);
2361 if (flag_force_mem)
2362 op0 = force_not_mem (op0);
2364 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2366 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2367 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2368 rtx xop0 = op0;
2370 if (target)
2371 temp = target;
2372 else
2373 temp = gen_reg_rtx (mode);
2375 if (GET_MODE (xop0) != VOIDmode
2376 && GET_MODE (xop0) != mode0)
2377 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2379 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2381 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2382 xop0 = copy_to_mode_reg (mode0, xop0);
2384 if (!insn_data[icode].operand[0].predicate (temp, mode))
2385 temp = gen_reg_rtx (mode);
2387 pat = GEN_FCN (icode) (temp, xop0);
2388 if (pat)
2390 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2391 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
2393 delete_insns_since (last);
2394 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2397 emit_insn (pat);
2399 return temp;
2401 else
2402 delete_insns_since (last);
2405 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2407 /* Widening clz needs special treatment. */
2408 if (unoptab == clz_optab)
2410 temp = widen_clz (mode, op0, target);
2411 if (temp)
2412 return temp;
2413 else
2414 goto try_libcall;
2417 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2418 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2419 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2421 if (unoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing)
2423 rtx xop0 = op0;
2425 /* For certain operations, we need not actually extend
2426 the narrow operand, as long as we will truncate the
2427 results to the same narrowness. */
2429 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2430 (unoptab == neg_optab
2431 || unoptab == one_cmpl_optab)
2432 && class == MODE_INT);
2434 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2435 unsignedp);
2437 if (temp)
2439 if (class != MODE_INT)
2441 if (target == 0)
2442 target = gen_reg_rtx (mode);
2443 convert_move (target, temp, 0);
2444 return target;
2446 else
2447 return gen_lowpart (mode, temp);
2449 else
2450 delete_insns_since (last);
2454 /* These can be done a word at a time. */
2455 if (unoptab == one_cmpl_optab
2456 && class == MODE_INT
2457 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
2458 && unoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
2460 int i;
2461 rtx insns;
2463 if (target == 0 || target == op0)
2464 target = gen_reg_rtx (mode);
2466 start_sequence ();
2468 /* Do the actual arithmetic. */
2469 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
2471 rtx target_piece = operand_subword (target, i, 1, mode);
2472 rtx x = expand_unop (word_mode, unoptab,
2473 operand_subword_force (op0, i, mode),
2474 target_piece, unsignedp);
2476 if (target_piece != x)
2477 emit_move_insn (target_piece, x);
2480 insns = get_insns ();
2481 end_sequence ();
2483 emit_no_conflict_block (insns, target, op0, NULL_RTX,
2484 gen_rtx_fmt_e (unoptab->code, mode,
2485 copy_rtx (op0)));
2486 return target;
2489 if (unoptab->code == NEG)
2491 /* Try negating floating point values by flipping the sign bit. */
2492 if (class == MODE_FLOAT)
2494 temp = expand_absneg_bit (NEG, mode, op0, target);
2495 if (temp)
2496 return temp;
2499 /* If there is no negation pattern, and we have no negative zero,
2500 try subtracting from zero. */
2501 if (!HONOR_SIGNED_ZEROS (mode))
2503 temp = expand_binop (mode, (unoptab == negv_optab
2504 ? subv_optab : sub_optab),
2505 CONST0_RTX (mode), op0, target,
2506 unsignedp, OPTAB_DIRECT);
2507 if (temp)
2508 return temp;
2512 /* Try calculating parity (x) as popcount (x) % 2. */
2513 if (unoptab == parity_optab)
2515 temp = expand_parity (mode, op0, target);
2516 if (temp)
2517 return temp;
2520 try_libcall:
2521 /* Now try a library call in this mode. */
2522 if (unoptab->handlers[(int) mode].libfunc)
2524 rtx insns;
2525 rtx value;
2526 enum machine_mode outmode = mode;
2528 /* All of these functions return small values. Thus we choose to
2529 have them return something that isn't a double-word. */
2530 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
2531 || unoptab == popcount_optab || unoptab == parity_optab)
2532 outmode
2533 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node)));
2535 start_sequence ();
2537 /* Pass 1 for NO_QUEUE so we don't lose any increments
2538 if the libcall is cse'd or moved. */
2539 value = emit_library_call_value (unoptab->handlers[(int) mode].libfunc,
2540 NULL_RTX, LCT_CONST, outmode,
2541 1, op0, mode);
2542 insns = get_insns ();
2543 end_sequence ();
2545 target = gen_reg_rtx (outmode);
2546 emit_libcall_block (insns, target, value,
2547 gen_rtx_fmt_e (unoptab->code, mode, op0));
2549 return target;
2552 /* It can't be done in this mode. Can we do it in a wider mode? */
2554 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2556 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2557 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2559 if ((unoptab->handlers[(int) wider_mode].insn_code
2560 != CODE_FOR_nothing)
2561 || unoptab->handlers[(int) wider_mode].libfunc)
2563 rtx xop0 = op0;
2565 /* For certain operations, we need not actually extend
2566 the narrow operand, as long as we will truncate the
2567 results to the same narrowness. */
2569 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2570 (unoptab == neg_optab
2571 || unoptab == one_cmpl_optab)
2572 && class == MODE_INT);
2574 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2575 unsignedp);
2577 /* If we are generating clz using wider mode, adjust the
2578 result. */
2579 if (unoptab == clz_optab && temp != 0)
2580 temp = expand_binop (wider_mode, sub_optab, temp,
2581 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2582 - GET_MODE_BITSIZE (mode)),
2583 target, true, OPTAB_DIRECT);
2585 if (temp)
2587 if (class != MODE_INT)
2589 if (target == 0)
2590 target = gen_reg_rtx (mode);
2591 convert_move (target, temp, 0);
2592 return target;
2594 else
2595 return gen_lowpart (mode, temp);
2597 else
2598 delete_insns_since (last);
2603 /* One final attempt at implementing negation via subtraction,
2604 this time allowing widening of the operand. */
2605 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
2607 rtx temp;
2608 temp = expand_binop (mode,
2609 unoptab == negv_optab ? subv_optab : sub_optab,
2610 CONST0_RTX (mode), op0,
2611 target, unsignedp, OPTAB_LIB_WIDEN);
2612 if (temp)
2613 return temp;
2616 return 0;
2619 /* Emit code to compute the absolute value of OP0, with result to
2620 TARGET if convenient. (TARGET may be 0.) The return value says
2621 where the result actually is to be found.
2623 MODE is the mode of the operand; the mode of the result is
2624 different but can be deduced from MODE.
2629 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
2630 int result_unsignedp)
2632 rtx temp;
2634 if (! flag_trapv)
2635 result_unsignedp = 1;
2637 /* First try to do it with a special abs instruction. */
2638 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
2639 op0, target, 0);
2640 if (temp != 0)
2641 return temp;
2643 /* For floating point modes, try clearing the sign bit. */
2644 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2646 temp = expand_absneg_bit (ABS, mode, op0, target);
2647 if (temp)
2648 return temp;
2651 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2652 if (smax_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
2653 && !HONOR_SIGNED_ZEROS (mode))
2655 rtx last = get_last_insn ();
2657 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
2658 if (temp != 0)
2659 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
2660 OPTAB_WIDEN);
2662 if (temp != 0)
2663 return temp;
2665 delete_insns_since (last);
2668 /* If this machine has expensive jumps, we can do integer absolute
2669 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2670 where W is the width of MODE. */
2672 if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
2674 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
2675 size_int (GET_MODE_BITSIZE (mode) - 1),
2676 NULL_RTX, 0);
2678 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
2679 OPTAB_LIB_WIDEN);
2680 if (temp != 0)
2681 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
2682 temp, extended, target, 0, OPTAB_LIB_WIDEN);
2684 if (temp != 0)
2685 return temp;
2688 return NULL_RTX;
2692 expand_abs (enum machine_mode mode, rtx op0, rtx target,
2693 int result_unsignedp, int safe)
2695 rtx temp, op1;
2697 if (! flag_trapv)
2698 result_unsignedp = 1;
2700 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
2701 if (temp != 0)
2702 return temp;
2704 /* If that does not win, use conditional jump and negate. */
2706 /* It is safe to use the target if it is the same
2707 as the source if this is also a pseudo register */
2708 if (op0 == target && REG_P (op0)
2709 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
2710 safe = 1;
2712 op1 = gen_label_rtx ();
2713 if (target == 0 || ! safe
2714 || GET_MODE (target) != mode
2715 || (MEM_P (target) && MEM_VOLATILE_P (target))
2716 || (REG_P (target)
2717 && REGNO (target) < FIRST_PSEUDO_REGISTER))
2718 target = gen_reg_rtx (mode);
2720 emit_move_insn (target, op0);
2721 NO_DEFER_POP;
2723 /* If this mode is an integer too wide to compare properly,
2724 compare word by word. Rely on CSE to optimize constant cases. */
2725 if (GET_MODE_CLASS (mode) == MODE_INT
2726 && ! can_compare_p (GE, mode, ccp_jump))
2727 do_jump_by_parts_greater_rtx (mode, 0, target, const0_rtx,
2728 NULL_RTX, op1);
2729 else
2730 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
2731 NULL_RTX, NULL_RTX, op1);
2733 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
2734 target, target, 0);
2735 if (op0 != target)
2736 emit_move_insn (target, op0);
2737 emit_label (op1);
2738 OK_DEFER_POP;
2739 return target;
2742 /* A subroutine of expand_copysign, perform the copysign operation using the
2743 abs and neg primitives advertised to exist on the target. The assumption
2744 is that we have a split register file, and leaving op0 in fp registers,
2745 and not playing with subregs so much, will help the register allocator. */
2747 static rtx
2748 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
2749 int bitpos, bool op0_is_abs)
2751 enum machine_mode imode;
2752 HOST_WIDE_INT hi, lo;
2753 int word;
2754 rtx label;
2756 if (target == op1)
2757 target = NULL_RTX;
2759 if (!op0_is_abs)
2761 op0 = expand_unop (mode, abs_optab, op0, target, 0);
2762 if (op0 == NULL)
2763 return NULL_RTX;
2764 target = op0;
2766 else
2768 if (target == NULL_RTX)
2769 target = copy_to_reg (op0);
2770 else
2771 emit_move_insn (target, op0);
2774 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2776 imode = int_mode_for_mode (mode);
2777 if (imode == BLKmode)
2778 return NULL_RTX;
2779 op1 = gen_lowpart (imode, op1);
2781 else
2783 imode = word_mode;
2784 if (FLOAT_WORDS_BIG_ENDIAN)
2785 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2786 else
2787 word = bitpos / BITS_PER_WORD;
2788 bitpos = bitpos % BITS_PER_WORD;
2789 op1 = operand_subword_force (op1, word, mode);
2792 if (bitpos < HOST_BITS_PER_WIDE_INT)
2794 hi = 0;
2795 lo = (HOST_WIDE_INT) 1 << bitpos;
2797 else
2799 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2800 lo = 0;
2803 op1 = expand_binop (imode, and_optab, op1,
2804 immed_double_const (lo, hi, imode),
2805 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2807 label = gen_label_rtx ();
2808 emit_cmp_and_jump_insns (op1, const0_rtx, EQ, NULL_RTX, imode, 1, label);
2810 if (GET_CODE (op0) == CONST_DOUBLE)
2811 op0 = simplify_unary_operation (NEG, mode, op0, mode);
2812 else
2813 op0 = expand_unop (mode, neg_optab, op0, target, 0);
2814 if (op0 != target)
2815 emit_move_insn (target, op0);
2817 emit_label (label);
2819 return target;
2823 /* A subroutine of expand_copysign, perform the entire copysign operation
2824 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
2825 is true if op0 is known to have its sign bit clear. */
2827 static rtx
2828 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
2829 int bitpos, bool op0_is_abs)
2831 enum machine_mode imode;
2832 HOST_WIDE_INT hi, lo;
2833 int word, nwords, i;
2834 rtx temp, insns;
2836 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2838 imode = int_mode_for_mode (mode);
2839 if (imode == BLKmode)
2840 return NULL_RTX;
2841 word = 0;
2842 nwords = 1;
2844 else
2846 imode = word_mode;
2848 if (FLOAT_WORDS_BIG_ENDIAN)
2849 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2850 else
2851 word = bitpos / BITS_PER_WORD;
2852 bitpos = bitpos % BITS_PER_WORD;
2853 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2856 if (bitpos < HOST_BITS_PER_WIDE_INT)
2858 hi = 0;
2859 lo = (HOST_WIDE_INT) 1 << bitpos;
2861 else
2863 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2864 lo = 0;
2867 if (target == 0 || target == op0 || target == op1)
2868 target = gen_reg_rtx (mode);
2870 if (nwords > 1)
2872 start_sequence ();
2874 for (i = 0; i < nwords; ++i)
2876 rtx targ_piece = operand_subword (target, i, 1, mode);
2877 rtx op0_piece = operand_subword_force (op0, i, mode);
2879 if (i == word)
2881 if (!op0_is_abs)
2882 op0_piece = expand_binop (imode, and_optab, op0_piece,
2883 immed_double_const (~lo, ~hi, imode),
2884 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2886 op1 = expand_binop (imode, and_optab,
2887 operand_subword_force (op1, i, mode),
2888 immed_double_const (lo, hi, imode),
2889 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2891 temp = expand_binop (imode, ior_optab, op0_piece, op1,
2892 targ_piece, 1, OPTAB_LIB_WIDEN);
2893 if (temp != targ_piece)
2894 emit_move_insn (targ_piece, temp);
2896 else
2897 emit_move_insn (targ_piece, op0_piece);
2900 insns = get_insns ();
2901 end_sequence ();
2903 emit_no_conflict_block (insns, target, op0, op1, NULL_RTX);
2905 else
2907 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
2908 immed_double_const (lo, hi, imode),
2909 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2911 op0 = gen_lowpart (imode, op0);
2912 if (!op0_is_abs)
2913 op0 = expand_binop (imode, and_optab, op0,
2914 immed_double_const (~lo, ~hi, imode),
2915 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2917 temp = expand_binop (imode, ior_optab, op0, op1,
2918 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2919 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2922 return target;
2925 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
2926 scalar floating point mode. Return NULL if we do not know how to
2927 expand the operation inline. */
2930 expand_copysign (rtx op0, rtx op1, rtx target)
2932 enum machine_mode mode = GET_MODE (op0);
2933 const struct real_format *fmt;
2934 bool op0_is_abs;
2935 rtx temp;
2937 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
2938 gcc_assert (GET_MODE (op1) == mode);
2940 /* First try to do it with a special instruction. */
2941 temp = expand_binop (mode, copysign_optab, op0, op1,
2942 target, 0, OPTAB_DIRECT);
2943 if (temp)
2944 return temp;
2946 fmt = REAL_MODE_FORMAT (mode);
2947 if (fmt == NULL || !fmt->has_signed_zero)
2948 return NULL_RTX;
2950 op0_is_abs = false;
2951 if (GET_CODE (op0) == CONST_DOUBLE)
2953 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
2954 op0 = simplify_unary_operation (ABS, mode, op0, mode);
2955 op0_is_abs = true;
2958 if (fmt->signbit_ro >= 0
2959 && (GET_CODE (op0) == CONST_DOUBLE
2960 || (neg_optab->handlers[mode].insn_code != CODE_FOR_nothing
2961 && abs_optab->handlers[mode].insn_code != CODE_FOR_nothing)))
2963 temp = expand_copysign_absneg (mode, op0, op1, target,
2964 fmt->signbit_ro, op0_is_abs);
2965 if (temp)
2966 return temp;
2969 if (fmt->signbit_rw < 0)
2970 return NULL_RTX;
2971 return expand_copysign_bit (mode, op0, op1, target,
2972 fmt->signbit_rw, op0_is_abs);
2975 /* Generate an instruction whose insn-code is INSN_CODE,
2976 with two operands: an output TARGET and an input OP0.
2977 TARGET *must* be nonzero, and the output is always stored there.
2978 CODE is an rtx code such that (CODE OP0) is an rtx that describes
2979 the value that is stored into TARGET. */
2981 void
2982 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
2984 rtx temp;
2985 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2986 rtx pat;
2988 temp = target;
2990 /* Sign and zero extension from memory is often done specially on
2991 RISC machines, so forcing into a register here can pessimize
2992 code. */
2993 if (flag_force_mem && code != SIGN_EXTEND && code != ZERO_EXTEND)
2994 op0 = force_not_mem (op0);
2996 /* Now, if insn does not accept our operands, put them into pseudos. */
2998 if (!insn_data[icode].operand[1].predicate (op0, mode0))
2999 op0 = copy_to_mode_reg (mode0, op0);
3001 if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp))
3002 || (flag_force_mem && MEM_P (temp)))
3003 temp = gen_reg_rtx (GET_MODE (temp));
3005 pat = GEN_FCN (icode) (temp, op0);
3007 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3008 add_equal_note (pat, temp, code, op0, NULL_RTX);
3010 emit_insn (pat);
3012 if (temp != target)
3013 emit_move_insn (target, temp);
3016 struct no_conflict_data
3018 rtx target, first, insn;
3019 bool must_stay;
3022 /* Called via note_stores by emit_no_conflict_block. Set P->must_stay
3023 if the currently examined clobber / store has to stay in the list of
3024 insns that constitute the actual no_conflict block. */
3025 static void
3026 no_conflict_move_test (rtx dest, rtx set, void *p0)
3028 struct no_conflict_data *p= p0;
3030 /* If this inns directly contributes to setting the target, it must stay. */
3031 if (reg_overlap_mentioned_p (p->target, dest))
3032 p->must_stay = true;
3033 /* If we haven't committed to keeping any other insns in the list yet,
3034 there is nothing more to check. */
3035 else if (p->insn == p->first)
3036 return;
3037 /* If this insn sets / clobbers a register that feeds one of the insns
3038 already in the list, this insn has to stay too. */
3039 else if (reg_mentioned_p (dest, PATTERN (p->first))
3040 || reg_used_between_p (dest, p->first, p->insn)
3041 /* Likewise if this insn depends on a register set by a previous
3042 insn in the list. */
3043 || (GET_CODE (set) == SET
3044 && (modified_in_p (SET_SRC (set), p->first)
3045 || modified_between_p (SET_SRC (set), p->first, p->insn))))
3046 p->must_stay = true;
3049 /* Emit code to perform a series of operations on a multi-word quantity, one
3050 word at a time.
3052 Such a block is preceded by a CLOBBER of the output, consists of multiple
3053 insns, each setting one word of the output, and followed by a SET copying
3054 the output to itself.
3056 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3057 note indicating that it doesn't conflict with the (also multi-word)
3058 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3059 notes.
3061 INSNS is a block of code generated to perform the operation, not including
3062 the CLOBBER and final copy. All insns that compute intermediate values
3063 are first emitted, followed by the block as described above.
3065 TARGET, OP0, and OP1 are the output and inputs of the operations,
3066 respectively. OP1 may be zero for a unary operation.
3068 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3069 on the last insn.
3071 If TARGET is not a register, INSNS is simply emitted with no special
3072 processing. Likewise if anything in INSNS is not an INSN or if
3073 there is a libcall block inside INSNS.
3075 The final insn emitted is returned. */
3078 emit_no_conflict_block (rtx insns, rtx target, rtx op0, rtx op1, rtx equiv)
3080 rtx prev, next, first, last, insn;
3082 if (!REG_P (target) || reload_in_progress)
3083 return emit_insn (insns);
3084 else
3085 for (insn = insns; insn; insn = NEXT_INSN (insn))
3086 if (!NONJUMP_INSN_P (insn)
3087 || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
3088 return emit_insn (insns);
3090 /* First emit all insns that do not store into words of the output and remove
3091 these from the list. */
3092 for (insn = insns; insn; insn = next)
3094 rtx note;
3095 struct no_conflict_data data;
3097 next = NEXT_INSN (insn);
3099 /* Some ports (cris) create a libcall regions at their own. We must
3100 avoid any potential nesting of LIBCALLs. */
3101 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3102 remove_note (insn, note);
3103 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3104 remove_note (insn, note);
3106 data.target = target;
3107 data.first = insns;
3108 data.insn = insn;
3109 data.must_stay = 0;
3110 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3111 if (! data.must_stay)
3113 if (PREV_INSN (insn))
3114 NEXT_INSN (PREV_INSN (insn)) = next;
3115 else
3116 insns = next;
3118 if (next)
3119 PREV_INSN (next) = PREV_INSN (insn);
3121 add_insn (insn);
3125 prev = get_last_insn ();
3127 /* Now write the CLOBBER of the output, followed by the setting of each
3128 of the words, followed by the final copy. */
3129 if (target != op0 && target != op1)
3130 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
3132 for (insn = insns; insn; insn = next)
3134 next = NEXT_INSN (insn);
3135 add_insn (insn);
3137 if (op1 && REG_P (op1))
3138 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op1,
3139 REG_NOTES (insn));
3141 if (op0 && REG_P (op0))
3142 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op0,
3143 REG_NOTES (insn));
3146 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3147 != CODE_FOR_nothing)
3149 last = emit_move_insn (target, target);
3150 if (equiv)
3151 set_unique_reg_note (last, REG_EQUAL, equiv);
3153 else
3155 last = get_last_insn ();
3157 /* Remove any existing REG_EQUAL note from "last", or else it will
3158 be mistaken for a note referring to the full contents of the
3159 alleged libcall value when found together with the REG_RETVAL
3160 note added below. An existing note can come from an insn
3161 expansion at "last". */
3162 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3165 if (prev == 0)
3166 first = get_insns ();
3167 else
3168 first = NEXT_INSN (prev);
3170 /* Encapsulate the block so it gets manipulated as a unit. */
3171 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3172 REG_NOTES (first));
3173 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first, REG_NOTES (last));
3175 return last;
3178 /* Emit code to make a call to a constant function or a library call.
3180 INSNS is a list containing all insns emitted in the call.
3181 These insns leave the result in RESULT. Our block is to copy RESULT
3182 to TARGET, which is logically equivalent to EQUIV.
3184 We first emit any insns that set a pseudo on the assumption that these are
3185 loading constants into registers; doing so allows them to be safely cse'ed
3186 between blocks. Then we emit all the other insns in the block, followed by
3187 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3188 note with an operand of EQUIV.
3190 Moving assignments to pseudos outside of the block is done to improve
3191 the generated code, but is not required to generate correct code,
3192 hence being unable to move an assignment is not grounds for not making
3193 a libcall block. There are two reasons why it is safe to leave these
3194 insns inside the block: First, we know that these pseudos cannot be
3195 used in generated RTL outside the block since they are created for
3196 temporary purposes within the block. Second, CSE will not record the
3197 values of anything set inside a libcall block, so we know they must
3198 be dead at the end of the block.
3200 Except for the first group of insns (the ones setting pseudos), the
3201 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3203 void
3204 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3206 rtx final_dest = target;
3207 rtx prev, next, first, last, insn;
3209 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3210 into a MEM later. Protect the libcall block from this change. */
3211 if (! REG_P (target) || REG_USERVAR_P (target))
3212 target = gen_reg_rtx (GET_MODE (target));
3214 /* If we're using non-call exceptions, a libcall corresponding to an
3215 operation that may trap may also trap. */
3216 if (flag_non_call_exceptions && may_trap_p (equiv))
3218 for (insn = insns; insn; insn = NEXT_INSN (insn))
3219 if (CALL_P (insn))
3221 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3223 if (note != 0 && INTVAL (XEXP (note, 0)) <= 0)
3224 remove_note (insn, note);
3227 else
3228 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3229 reg note to indicate that this call cannot throw or execute a nonlocal
3230 goto (unless there is already a REG_EH_REGION note, in which case
3231 we update it). */
3232 for (insn = insns; insn; insn = NEXT_INSN (insn))
3233 if (CALL_P (insn))
3235 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3237 if (note != 0)
3238 XEXP (note, 0) = constm1_rtx;
3239 else
3240 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx,
3241 REG_NOTES (insn));
3244 /* First emit all insns that set pseudos. Remove them from the list as
3245 we go. Avoid insns that set pseudos which were referenced in previous
3246 insns. These can be generated by move_by_pieces, for example,
3247 to update an address. Similarly, avoid insns that reference things
3248 set in previous insns. */
3250 for (insn = insns; insn; insn = next)
3252 rtx set = single_set (insn);
3253 rtx note;
3255 /* Some ports (cris) create a libcall regions at their own. We must
3256 avoid any potential nesting of LIBCALLs. */
3257 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3258 remove_note (insn, note);
3259 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3260 remove_note (insn, note);
3262 next = NEXT_INSN (insn);
3264 if (set != 0 && REG_P (SET_DEST (set))
3265 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
3266 && (insn == insns
3267 || ((! INSN_P(insns)
3268 || ! reg_mentioned_p (SET_DEST (set), PATTERN (insns)))
3269 && ! reg_used_between_p (SET_DEST (set), insns, insn)
3270 && ! modified_in_p (SET_SRC (set), insns)
3271 && ! modified_between_p (SET_SRC (set), insns, insn))))
3273 if (PREV_INSN (insn))
3274 NEXT_INSN (PREV_INSN (insn)) = next;
3275 else
3276 insns = next;
3278 if (next)
3279 PREV_INSN (next) = PREV_INSN (insn);
3281 add_insn (insn);
3284 /* Some ports use a loop to copy large arguments onto the stack.
3285 Don't move anything outside such a loop. */
3286 if (LABEL_P (insn))
3287 break;
3290 prev = get_last_insn ();
3292 /* Write the remaining insns followed by the final copy. */
3294 for (insn = insns; insn; insn = next)
3296 next = NEXT_INSN (insn);
3298 add_insn (insn);
3301 last = emit_move_insn (target, result);
3302 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3303 != CODE_FOR_nothing)
3304 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
3305 else
3307 /* Remove any existing REG_EQUAL note from "last", or else it will
3308 be mistaken for a note referring to the full contents of the
3309 libcall value when found together with the REG_RETVAL note added
3310 below. An existing note can come from an insn expansion at
3311 "last". */
3312 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3315 if (final_dest != target)
3316 emit_move_insn (final_dest, target);
3318 if (prev == 0)
3319 first = get_insns ();
3320 else
3321 first = NEXT_INSN (prev);
3323 /* Encapsulate the block so it gets manipulated as a unit. */
3324 if (!flag_non_call_exceptions || !may_trap_p (equiv))
3326 /* We can't attach the REG_LIBCALL and REG_RETVAL notes
3327 when the encapsulated region would not be in one basic block,
3328 i.e. when there is a control_flow_insn_p insn between FIRST and LAST.
3330 bool attach_libcall_retval_notes = true;
3331 next = NEXT_INSN (last);
3332 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3333 if (control_flow_insn_p (insn))
3335 attach_libcall_retval_notes = false;
3336 break;
3339 if (attach_libcall_retval_notes)
3341 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3342 REG_NOTES (first));
3343 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first,
3344 REG_NOTES (last));
3349 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3350 PURPOSE describes how this comparison will be used. CODE is the rtx
3351 comparison code we will be using.
3353 ??? Actually, CODE is slightly weaker than that. A target is still
3354 required to implement all of the normal bcc operations, but not
3355 required to implement all (or any) of the unordered bcc operations. */
3358 can_compare_p (enum rtx_code code, enum machine_mode mode,
3359 enum can_compare_purpose purpose)
3363 if (cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3365 if (purpose == ccp_jump)
3366 return bcc_gen_fctn[(int) code] != NULL;
3367 else if (purpose == ccp_store_flag)
3368 return setcc_gen_code[(int) code] != CODE_FOR_nothing;
3369 else
3370 /* There's only one cmov entry point, and it's allowed to fail. */
3371 return 1;
3373 if (purpose == ccp_jump
3374 && cbranch_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3375 return 1;
3376 if (purpose == ccp_cmov
3377 && cmov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3378 return 1;
3379 if (purpose == ccp_store_flag
3380 && cstore_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3381 return 1;
3382 mode = GET_MODE_WIDER_MODE (mode);
3384 while (mode != VOIDmode);
3386 return 0;
3389 /* This function is called when we are going to emit a compare instruction that
3390 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3392 *PMODE is the mode of the inputs (in case they are const_int).
3393 *PUNSIGNEDP nonzero says that the operands are unsigned;
3394 this matters if they need to be widened.
3396 If they have mode BLKmode, then SIZE specifies the size of both operands.
3398 This function performs all the setup necessary so that the caller only has
3399 to emit a single comparison insn. This setup can involve doing a BLKmode
3400 comparison or emitting a library call to perform the comparison if no insn
3401 is available to handle it.
3402 The values which are passed in through pointers can be modified; the caller
3403 should perform the comparison on the modified values. Constant
3404 comparisons must have already been folded. */
3406 static void
3407 prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
3408 enum machine_mode *pmode, int *punsignedp,
3409 enum can_compare_purpose purpose)
3411 enum machine_mode mode = *pmode;
3412 rtx x = *px, y = *py;
3413 int unsignedp = *punsignedp;
3414 enum mode_class class;
3416 class = GET_MODE_CLASS (mode);
3418 if (mode != BLKmode && flag_force_mem)
3420 /* Load duplicate non-volatile operands once. */
3421 if (rtx_equal_p (x, y) && ! volatile_refs_p (x))
3423 x = force_not_mem (x);
3424 y = x;
3426 else
3428 x = force_not_mem (x);
3429 y = force_not_mem (y);
3433 /* If we are inside an appropriately-short loop and we are optimizing,
3434 force expensive constants into a register. */
3435 if (CONSTANT_P (x) && optimize
3436 && rtx_cost (x, COMPARE) > COSTS_N_INSNS (1))
3437 x = force_reg (mode, x);
3439 if (CONSTANT_P (y) && optimize
3440 && rtx_cost (y, COMPARE) > COSTS_N_INSNS (1))
3441 y = force_reg (mode, y);
3443 #ifdef HAVE_cc0
3444 /* Make sure if we have a canonical comparison. The RTL
3445 documentation states that canonical comparisons are required only
3446 for targets which have cc0. */
3447 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3448 #endif
3450 /* Don't let both operands fail to indicate the mode. */
3451 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3452 x = force_reg (mode, x);
3454 /* Handle all BLKmode compares. */
3456 if (mode == BLKmode)
3458 enum machine_mode cmp_mode, result_mode;
3459 enum insn_code cmp_code;
3460 tree length_type;
3461 rtx libfunc;
3462 rtx result;
3463 rtx opalign
3464 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3466 gcc_assert (size);
3468 /* Try to use a memory block compare insn - either cmpstr
3469 or cmpmem will do. */
3470 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
3471 cmp_mode != VOIDmode;
3472 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
3474 cmp_code = cmpmem_optab[cmp_mode];
3475 if (cmp_code == CODE_FOR_nothing)
3476 cmp_code = cmpstr_optab[cmp_mode];
3477 if (cmp_code == CODE_FOR_nothing)
3478 continue;
3480 /* Must make sure the size fits the insn's mode. */
3481 if ((GET_CODE (size) == CONST_INT
3482 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
3483 || (GET_MODE_BITSIZE (GET_MODE (size))
3484 > GET_MODE_BITSIZE (cmp_mode)))
3485 continue;
3487 result_mode = insn_data[cmp_code].operand[0].mode;
3488 result = gen_reg_rtx (result_mode);
3489 size = convert_to_mode (cmp_mode, size, 1);
3490 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
3492 *px = result;
3493 *py = const0_rtx;
3494 *pmode = result_mode;
3495 return;
3498 /* Otherwise call a library function, memcmp. */
3499 libfunc = memcmp_libfunc;
3500 length_type = sizetype;
3501 result_mode = TYPE_MODE (integer_type_node);
3502 cmp_mode = TYPE_MODE (length_type);
3503 size = convert_to_mode (TYPE_MODE (length_type), size,
3504 TYPE_UNSIGNED (length_type));
3506 result = emit_library_call_value (libfunc, 0, LCT_PURE_MAKE_BLOCK,
3507 result_mode, 3,
3508 XEXP (x, 0), Pmode,
3509 XEXP (y, 0), Pmode,
3510 size, cmp_mode);
3511 *px = result;
3512 *py = const0_rtx;
3513 *pmode = result_mode;
3514 return;
3517 /* Don't allow operands to the compare to trap, as that can put the
3518 compare and branch in different basic blocks. */
3519 if (flag_non_call_exceptions)
3521 if (may_trap_p (x))
3522 x = force_reg (mode, x);
3523 if (may_trap_p (y))
3524 y = force_reg (mode, y);
3527 *px = x;
3528 *py = y;
3529 if (can_compare_p (*pcomparison, mode, purpose))
3530 return;
3532 /* Handle a lib call just for the mode we are using. */
3534 if (cmp_optab->handlers[(int) mode].libfunc && class != MODE_FLOAT)
3536 rtx libfunc = cmp_optab->handlers[(int) mode].libfunc;
3537 rtx result;
3539 /* If we want unsigned, and this mode has a distinct unsigned
3540 comparison routine, use that. */
3541 if (unsignedp && ucmp_optab->handlers[(int) mode].libfunc)
3542 libfunc = ucmp_optab->handlers[(int) mode].libfunc;
3544 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK,
3545 word_mode, 2, x, mode, y, mode);
3547 *px = result;
3548 *pmode = word_mode;
3549 if (TARGET_LIB_INT_CMP_BIASED)
3550 /* Integer comparison returns a result that must be compared
3551 against 1, so that even if we do an unsigned compare
3552 afterward, there is still a value that can represent the
3553 result "less than". */
3554 *py = const1_rtx;
3555 else
3557 *py = const0_rtx;
3558 *punsignedp = 1;
3560 return;
3563 gcc_assert (class == MODE_FLOAT);
3564 prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
3567 /* Before emitting an insn with code ICODE, make sure that X, which is going
3568 to be used for operand OPNUM of the insn, is converted from mode MODE to
3569 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3570 that it is accepted by the operand predicate. Return the new value. */
3572 static rtx
3573 prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
3574 enum machine_mode wider_mode, int unsignedp)
3576 if (mode != wider_mode)
3577 x = convert_modes (wider_mode, mode, x, unsignedp);
3579 if (!insn_data[icode].operand[opnum].predicate
3580 (x, insn_data[icode].operand[opnum].mode))
3582 if (no_new_pseudos)
3583 return NULL_RTX;
3584 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x);
3587 return x;
3590 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3591 we can do the comparison.
3592 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3593 be NULL_RTX which indicates that only a comparison is to be generated. */
3595 static void
3596 emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
3597 enum rtx_code comparison, int unsignedp, rtx label)
3599 rtx test = gen_rtx_fmt_ee (comparison, mode, x, y);
3600 enum mode_class class = GET_MODE_CLASS (mode);
3601 enum machine_mode wider_mode = mode;
3603 /* Try combined insns first. */
3606 enum insn_code icode;
3607 PUT_MODE (test, wider_mode);
3609 if (label)
3611 icode = cbranch_optab->handlers[(int) wider_mode].insn_code;
3613 if (icode != CODE_FOR_nothing
3614 && insn_data[icode].operand[0].predicate (test, wider_mode))
3616 x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp);
3617 y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp);
3618 emit_jump_insn (GEN_FCN (icode) (test, x, y, label));
3619 return;
3623 /* Handle some compares against zero. */
3624 icode = (int) tst_optab->handlers[(int) wider_mode].insn_code;
3625 if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing)
3627 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3628 emit_insn (GEN_FCN (icode) (x));
3629 if (label)
3630 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3631 return;
3634 /* Handle compares for which there is a directly suitable insn. */
3636 icode = (int) cmp_optab->handlers[(int) wider_mode].insn_code;
3637 if (icode != CODE_FOR_nothing)
3639 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3640 y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp);
3641 emit_insn (GEN_FCN (icode) (x, y));
3642 if (label)
3643 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3644 return;
3647 if (class != MODE_INT && class != MODE_FLOAT
3648 && class != MODE_COMPLEX_FLOAT)
3649 break;
3651 wider_mode = GET_MODE_WIDER_MODE (wider_mode);
3653 while (wider_mode != VOIDmode);
3655 gcc_unreachable ();
3658 /* Generate code to compare X with Y so that the condition codes are
3659 set and to jump to LABEL if the condition is true. If X is a
3660 constant and Y is not a constant, then the comparison is swapped to
3661 ensure that the comparison RTL has the canonical form.
3663 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3664 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3665 the proper branch condition code.
3667 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3669 MODE is the mode of the inputs (in case they are const_int).
3671 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3672 be passed unchanged to emit_cmp_insn, then potentially converted into an
3673 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3675 void
3676 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
3677 enum machine_mode mode, int unsignedp, rtx label)
3679 rtx op0 = x, op1 = y;
3681 /* Swap operands and condition to ensure canonical RTL. */
3682 if (swap_commutative_operands_p (x, y))
3684 /* If we're not emitting a branch, this means some caller
3685 is out of sync. */
3686 gcc_assert (label);
3688 op0 = y, op1 = x;
3689 comparison = swap_condition (comparison);
3692 #ifdef HAVE_cc0
3693 /* If OP0 is still a constant, then both X and Y must be constants.
3694 Force X into a register to create canonical RTL. */
3695 if (CONSTANT_P (op0))
3696 op0 = force_reg (mode, op0);
3697 #endif
3699 if (unsignedp)
3700 comparison = unsigned_condition (comparison);
3702 prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp,
3703 ccp_jump);
3704 emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label);
3707 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3709 void
3710 emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
3711 enum machine_mode mode, int unsignedp)
3713 emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0);
3716 /* Emit a library call comparison between floating point X and Y.
3717 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3719 static void
3720 prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
3721 enum machine_mode *pmode, int *punsignedp)
3723 enum rtx_code comparison = *pcomparison;
3724 enum rtx_code swapped = swap_condition (comparison);
3725 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
3726 rtx x = *px;
3727 rtx y = *py;
3728 enum machine_mode orig_mode = GET_MODE (x);
3729 enum machine_mode mode;
3730 rtx value, target, insns, equiv;
3731 rtx libfunc = 0;
3732 bool reversed_p = false;
3734 for (mode = orig_mode; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
3736 if ((libfunc = code_to_optab[comparison]->handlers[mode].libfunc))
3737 break;
3739 if ((libfunc = code_to_optab[swapped]->handlers[mode].libfunc))
3741 rtx tmp;
3742 tmp = x; x = y; y = tmp;
3743 comparison = swapped;
3744 break;
3747 if ((libfunc = code_to_optab[reversed]->handlers[mode].libfunc)
3748 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, reversed))
3750 comparison = reversed;
3751 reversed_p = true;
3752 break;
3756 gcc_assert (mode != VOIDmode);
3758 if (mode != orig_mode)
3760 x = convert_to_mode (mode, x, 0);
3761 y = convert_to_mode (mode, y, 0);
3764 /* Attach a REG_EQUAL note describing the semantics of the libcall to
3765 the RTL. The allows the RTL optimizers to delete the libcall if the
3766 condition can be determined at compile-time. */
3767 if (comparison == UNORDERED)
3769 rtx temp = simplify_gen_relational (NE, word_mode, mode, x, x);
3770 equiv = simplify_gen_relational (NE, word_mode, mode, y, y);
3771 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
3772 temp, const_true_rtx, equiv);
3774 else
3776 equiv = simplify_gen_relational (comparison, word_mode, mode, x, y);
3777 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
3779 rtx true_rtx, false_rtx;
3781 switch (comparison)
3783 case EQ:
3784 true_rtx = const0_rtx;
3785 false_rtx = const_true_rtx;
3786 break;
3788 case NE:
3789 true_rtx = const_true_rtx;
3790 false_rtx = const0_rtx;
3791 break;
3793 case GT:
3794 true_rtx = const1_rtx;
3795 false_rtx = const0_rtx;
3796 break;
3798 case GE:
3799 true_rtx = const0_rtx;
3800 false_rtx = constm1_rtx;
3801 break;
3803 case LT:
3804 true_rtx = constm1_rtx;
3805 false_rtx = const0_rtx;
3806 break;
3808 case LE:
3809 true_rtx = const0_rtx;
3810 false_rtx = const1_rtx;
3811 break;
3813 default:
3814 gcc_unreachable ();
3816 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
3817 equiv, true_rtx, false_rtx);
3821 start_sequence ();
3822 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
3823 word_mode, 2, x, mode, y, mode);
3824 insns = get_insns ();
3825 end_sequence ();
3827 target = gen_reg_rtx (word_mode);
3828 emit_libcall_block (insns, target, value, equiv);
3830 if (comparison == UNORDERED
3831 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
3832 comparison = reversed_p ? EQ : NE;
3834 *px = target;
3835 *py = const0_rtx;
3836 *pmode = word_mode;
3837 *pcomparison = comparison;
3838 *punsignedp = 0;
3841 /* Generate code to indirectly jump to a location given in the rtx LOC. */
3843 void
3844 emit_indirect_jump (rtx loc)
3846 if (!insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate
3847 (loc, Pmode))
3848 loc = copy_to_mode_reg (Pmode, loc);
3850 emit_jump_insn (gen_indirect_jump (loc));
3851 emit_barrier ();
3854 #ifdef HAVE_conditional_move
3856 /* Emit a conditional move instruction if the machine supports one for that
3857 condition and machine mode.
3859 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3860 the mode to use should they be constants. If it is VOIDmode, they cannot
3861 both be constants.
3863 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
3864 should be stored there. MODE is the mode to use should they be constants.
3865 If it is VOIDmode, they cannot both be constants.
3867 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3868 is not supported. */
3871 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
3872 enum machine_mode cmode, rtx op2, rtx op3,
3873 enum machine_mode mode, int unsignedp)
3875 rtx tem, subtarget, comparison, insn;
3876 enum insn_code icode;
3877 enum rtx_code reversed;
3879 /* If one operand is constant, make it the second one. Only do this
3880 if the other operand is not constant as well. */
3882 if (swap_commutative_operands_p (op0, op1))
3884 tem = op0;
3885 op0 = op1;
3886 op1 = tem;
3887 code = swap_condition (code);
3890 /* get_condition will prefer to generate LT and GT even if the old
3891 comparison was against zero, so undo that canonicalization here since
3892 comparisons against zero are cheaper. */
3893 if (code == LT && op1 == const1_rtx)
3894 code = LE, op1 = const0_rtx;
3895 else if (code == GT && op1 == constm1_rtx)
3896 code = GE, op1 = const0_rtx;
3898 if (cmode == VOIDmode)
3899 cmode = GET_MODE (op0);
3901 if (swap_commutative_operands_p (op2, op3)
3902 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
3903 != UNKNOWN))
3905 tem = op2;
3906 op2 = op3;
3907 op3 = tem;
3908 code = reversed;
3911 if (mode == VOIDmode)
3912 mode = GET_MODE (op2);
3914 icode = movcc_gen_code[mode];
3916 if (icode == CODE_FOR_nothing)
3917 return 0;
3919 if (flag_force_mem)
3921 op2 = force_not_mem (op2);
3922 op3 = force_not_mem (op3);
3925 if (!target)
3926 target = gen_reg_rtx (mode);
3928 subtarget = target;
3930 /* If the insn doesn't accept these operands, put them in pseudos. */
3932 if (!insn_data[icode].operand[0].predicate
3933 (subtarget, insn_data[icode].operand[0].mode))
3934 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
3936 if (!insn_data[icode].operand[2].predicate
3937 (op2, insn_data[icode].operand[2].mode))
3938 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
3940 if (!insn_data[icode].operand[3].predicate
3941 (op3, insn_data[icode].operand[3].mode))
3942 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
3944 /* Everything should now be in the suitable form, so emit the compare insn
3945 and then the conditional move. */
3947 comparison
3948 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
3950 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
3951 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
3952 return NULL and let the caller figure out how best to deal with this
3953 situation. */
3954 if (GET_CODE (comparison) != code)
3955 return NULL_RTX;
3957 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
3959 /* If that failed, then give up. */
3960 if (insn == 0)
3961 return 0;
3963 emit_insn (insn);
3965 if (subtarget != target)
3966 convert_move (target, subtarget, 0);
3968 return target;
3971 /* Return nonzero if a conditional move of mode MODE is supported.
3973 This function is for combine so it can tell whether an insn that looks
3974 like a conditional move is actually supported by the hardware. If we
3975 guess wrong we lose a bit on optimization, but that's it. */
3976 /* ??? sparc64 supports conditionally moving integers values based on fp
3977 comparisons, and vice versa. How do we handle them? */
3980 can_conditionally_move_p (enum machine_mode mode)
3982 if (movcc_gen_code[mode] != CODE_FOR_nothing)
3983 return 1;
3985 return 0;
3988 #endif /* HAVE_conditional_move */
3990 /* Emit a conditional addition instruction if the machine supports one for that
3991 condition and machine mode.
3993 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3994 the mode to use should they be constants. If it is VOIDmode, they cannot
3995 both be constants.
3997 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
3998 should be stored there. MODE is the mode to use should they be constants.
3999 If it is VOIDmode, they cannot both be constants.
4001 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4002 is not supported. */
4005 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4006 enum machine_mode cmode, rtx op2, rtx op3,
4007 enum machine_mode mode, int unsignedp)
4009 rtx tem, subtarget, comparison, insn;
4010 enum insn_code icode;
4011 enum rtx_code reversed;
4013 /* If one operand is constant, make it the second one. Only do this
4014 if the other operand is not constant as well. */
4016 if (swap_commutative_operands_p (op0, op1))
4018 tem = op0;
4019 op0 = op1;
4020 op1 = tem;
4021 code = swap_condition (code);
4024 /* get_condition will prefer to generate LT and GT even if the old
4025 comparison was against zero, so undo that canonicalization here since
4026 comparisons against zero are cheaper. */
4027 if (code == LT && op1 == const1_rtx)
4028 code = LE, op1 = const0_rtx;
4029 else if (code == GT && op1 == constm1_rtx)
4030 code = GE, op1 = const0_rtx;
4032 if (cmode == VOIDmode)
4033 cmode = GET_MODE (op0);
4035 if (swap_commutative_operands_p (op2, op3)
4036 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4037 != UNKNOWN))
4039 tem = op2;
4040 op2 = op3;
4041 op3 = tem;
4042 code = reversed;
4045 if (mode == VOIDmode)
4046 mode = GET_MODE (op2);
4048 icode = addcc_optab->handlers[(int) mode].insn_code;
4050 if (icode == CODE_FOR_nothing)
4051 return 0;
4053 if (flag_force_mem)
4055 op2 = force_not_mem (op2);
4056 op3 = force_not_mem (op3);
4059 if (!target)
4060 target = gen_reg_rtx (mode);
4062 /* If the insn doesn't accept these operands, put them in pseudos. */
4064 if (!insn_data[icode].operand[0].predicate
4065 (target, insn_data[icode].operand[0].mode))
4066 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4067 else
4068 subtarget = target;
4070 if (!insn_data[icode].operand[2].predicate
4071 (op2, insn_data[icode].operand[2].mode))
4072 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4074 if (!insn_data[icode].operand[3].predicate
4075 (op3, insn_data[icode].operand[3].mode))
4076 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4078 /* Everything should now be in the suitable form, so emit the compare insn
4079 and then the conditional move. */
4081 comparison
4082 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4084 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4085 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4086 return NULL and let the caller figure out how best to deal with this
4087 situation. */
4088 if (GET_CODE (comparison) != code)
4089 return NULL_RTX;
4091 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4093 /* If that failed, then give up. */
4094 if (insn == 0)
4095 return 0;
4097 emit_insn (insn);
4099 if (subtarget != target)
4100 convert_move (target, subtarget, 0);
4102 return target;
4105 /* These functions attempt to generate an insn body, rather than
4106 emitting the insn, but if the gen function already emits them, we
4107 make no attempt to turn them back into naked patterns. */
4109 /* Generate and return an insn body to add Y to X. */
4112 gen_add2_insn (rtx x, rtx y)
4114 int icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4116 gcc_assert (insn_data[icode].operand[0].predicate
4117 (x, insn_data[icode].operand[0].mode));
4118 gcc_assert (insn_data[icode].operand[1].predicate
4119 (x, insn_data[icode].operand[1].mode));
4120 gcc_assert (insn_data[icode].operand[2].predicate
4121 (y, insn_data[icode].operand[2].mode));
4123 return GEN_FCN (icode) (x, x, y);
4126 /* Generate and return an insn body to add r1 and c,
4127 storing the result in r0. */
4129 gen_add3_insn (rtx r0, rtx r1, rtx c)
4131 int icode = (int) add_optab->handlers[(int) GET_MODE (r0)].insn_code;
4133 if (icode == CODE_FOR_nothing
4134 || !(insn_data[icode].operand[0].predicate
4135 (r0, insn_data[icode].operand[0].mode))
4136 || !(insn_data[icode].operand[1].predicate
4137 (r1, insn_data[icode].operand[1].mode))
4138 || !(insn_data[icode].operand[2].predicate
4139 (c, insn_data[icode].operand[2].mode)))
4140 return NULL_RTX;
4142 return GEN_FCN (icode) (r0, r1, c);
4146 have_add2_insn (rtx x, rtx y)
4148 int icode;
4150 gcc_assert (GET_MODE (x) != VOIDmode);
4152 icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4154 if (icode == CODE_FOR_nothing)
4155 return 0;
4157 if (!(insn_data[icode].operand[0].predicate
4158 (x, insn_data[icode].operand[0].mode))
4159 || !(insn_data[icode].operand[1].predicate
4160 (x, insn_data[icode].operand[1].mode))
4161 || !(insn_data[icode].operand[2].predicate
4162 (y, insn_data[icode].operand[2].mode)))
4163 return 0;
4165 return 1;
4168 /* Generate and return an insn body to subtract Y from X. */
4171 gen_sub2_insn (rtx x, rtx y)
4173 int icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4175 gcc_assert (insn_data[icode].operand[0].predicate
4176 (x, insn_data[icode].operand[0].mode));
4177 gcc_assert (insn_data[icode].operand[1].predicate
4178 (x, insn_data[icode].operand[1].mode));
4179 gcc_assert (insn_data[icode].operand[2].predicate
4180 (y, insn_data[icode].operand[2].mode));
4182 return GEN_FCN (icode) (x, x, y);
4185 /* Generate and return an insn body to subtract r1 and c,
4186 storing the result in r0. */
4188 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4190 int icode = (int) sub_optab->handlers[(int) GET_MODE (r0)].insn_code;
4192 if (icode == CODE_FOR_nothing
4193 || !(insn_data[icode].operand[0].predicate
4194 (r0, insn_data[icode].operand[0].mode))
4195 || !(insn_data[icode].operand[1].predicate
4196 (r1, insn_data[icode].operand[1].mode))
4197 || !(insn_data[icode].operand[2].predicate
4198 (c, insn_data[icode].operand[2].mode)))
4199 return NULL_RTX;
4201 return GEN_FCN (icode) (r0, r1, c);
4205 have_sub2_insn (rtx x, rtx y)
4207 int icode;
4209 gcc_assert (GET_MODE (x) != VOIDmode);
4211 icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4213 if (icode == CODE_FOR_nothing)
4214 return 0;
4216 if (!(insn_data[icode].operand[0].predicate
4217 (x, insn_data[icode].operand[0].mode))
4218 || !(insn_data[icode].operand[1].predicate
4219 (x, insn_data[icode].operand[1].mode))
4220 || !(insn_data[icode].operand[2].predicate
4221 (y, insn_data[icode].operand[2].mode)))
4222 return 0;
4224 return 1;
4227 /* Generate the body of an instruction to copy Y into X.
4228 It may be a list of insns, if one insn isn't enough. */
4231 gen_move_insn (rtx x, rtx y)
4233 rtx seq;
4235 start_sequence ();
4236 emit_move_insn_1 (x, y);
4237 seq = get_insns ();
4238 end_sequence ();
4239 return seq;
4242 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4243 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4244 no such operation exists, CODE_FOR_nothing will be returned. */
4246 enum insn_code
4247 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4248 int unsignedp)
4250 convert_optab tab;
4251 #ifdef HAVE_ptr_extend
4252 if (unsignedp < 0)
4253 return CODE_FOR_ptr_extend;
4254 #endif
4256 tab = unsignedp ? zext_optab : sext_optab;
4257 return tab->handlers[to_mode][from_mode].insn_code;
4260 /* Generate the body of an insn to extend Y (with mode MFROM)
4261 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4264 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4265 enum machine_mode mfrom, int unsignedp)
4267 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4268 return GEN_FCN (icode) (x, y);
4271 /* can_fix_p and can_float_p say whether the target machine
4272 can directly convert a given fixed point type to
4273 a given floating point type, or vice versa.
4274 The returned value is the CODE_FOR_... value to use,
4275 or CODE_FOR_nothing if these modes cannot be directly converted.
4277 *TRUNCP_PTR is set to 1 if it is necessary to output
4278 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4280 static enum insn_code
4281 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4282 int unsignedp, int *truncp_ptr)
4284 convert_optab tab;
4285 enum insn_code icode;
4287 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4288 icode = tab->handlers[fixmode][fltmode].insn_code;
4289 if (icode != CODE_FOR_nothing)
4291 *truncp_ptr = 0;
4292 return icode;
4295 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4296 for this to work. We need to rework the fix* and ftrunc* patterns
4297 and documentation. */
4298 tab = unsignedp ? ufix_optab : sfix_optab;
4299 icode = tab->handlers[fixmode][fltmode].insn_code;
4300 if (icode != CODE_FOR_nothing
4301 && ftrunc_optab->handlers[fltmode].insn_code != CODE_FOR_nothing)
4303 *truncp_ptr = 1;
4304 return icode;
4307 *truncp_ptr = 0;
4308 return CODE_FOR_nothing;
4311 static enum insn_code
4312 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4313 int unsignedp)
4315 convert_optab tab;
4317 tab = unsignedp ? ufloat_optab : sfloat_optab;
4318 return tab->handlers[fltmode][fixmode].insn_code;
4321 /* Generate code to convert FROM to floating point
4322 and store in TO. FROM must be fixed point and not VOIDmode.
4323 UNSIGNEDP nonzero means regard FROM as unsigned.
4324 Normally this is done by correcting the final value
4325 if it is negative. */
4327 void
4328 expand_float (rtx to, rtx from, int unsignedp)
4330 enum insn_code icode;
4331 rtx target = to;
4332 enum machine_mode fmode, imode;
4334 /* Crash now, because we won't be able to decide which mode to use. */
4335 gcc_assert (GET_MODE (from) != VOIDmode);
4337 /* Look for an insn to do the conversion. Do it in the specified
4338 modes if possible; otherwise convert either input, output or both to
4339 wider mode. If the integer mode is wider than the mode of FROM,
4340 we can do the conversion signed even if the input is unsigned. */
4342 for (fmode = GET_MODE (to); fmode != VOIDmode;
4343 fmode = GET_MODE_WIDER_MODE (fmode))
4344 for (imode = GET_MODE (from); imode != VOIDmode;
4345 imode = GET_MODE_WIDER_MODE (imode))
4347 int doing_unsigned = unsignedp;
4349 if (fmode != GET_MODE (to)
4350 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from)))
4351 continue;
4353 icode = can_float_p (fmode, imode, unsignedp);
4354 if (icode == CODE_FOR_nothing && imode != GET_MODE (from) && unsignedp)
4355 icode = can_float_p (fmode, imode, 0), doing_unsigned = 0;
4357 if (icode != CODE_FOR_nothing)
4359 if (imode != GET_MODE (from))
4360 from = convert_to_mode (imode, from, unsignedp);
4362 if (fmode != GET_MODE (to))
4363 target = gen_reg_rtx (fmode);
4365 emit_unop_insn (icode, target, from,
4366 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4368 if (target != to)
4369 convert_move (to, target, 0);
4370 return;
4374 /* Unsigned integer, and no way to convert directly.
4375 Convert as signed, then conditionally adjust the result. */
4376 if (unsignedp)
4378 rtx label = gen_label_rtx ();
4379 rtx temp;
4380 REAL_VALUE_TYPE offset;
4382 if (flag_force_mem)
4383 from = force_not_mem (from);
4385 /* Look for a usable floating mode FMODE wider than the source and at
4386 least as wide as the target. Using FMODE will avoid rounding woes
4387 with unsigned values greater than the signed maximum value. */
4389 for (fmode = GET_MODE (to); fmode != VOIDmode;
4390 fmode = GET_MODE_WIDER_MODE (fmode))
4391 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
4392 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
4393 break;
4395 if (fmode == VOIDmode)
4397 /* There is no such mode. Pretend the target is wide enough. */
4398 fmode = GET_MODE (to);
4400 /* Avoid double-rounding when TO is narrower than FROM. */
4401 if ((significand_size (fmode) + 1)
4402 < GET_MODE_BITSIZE (GET_MODE (from)))
4404 rtx temp1;
4405 rtx neglabel = gen_label_rtx ();
4407 /* Don't use TARGET if it isn't a register, is a hard register,
4408 or is the wrong mode. */
4409 if (!REG_P (target)
4410 || REGNO (target) < FIRST_PSEUDO_REGISTER
4411 || GET_MODE (target) != fmode)
4412 target = gen_reg_rtx (fmode);
4414 imode = GET_MODE (from);
4415 do_pending_stack_adjust ();
4417 /* Test whether the sign bit is set. */
4418 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4419 0, neglabel);
4421 /* The sign bit is not set. Convert as signed. */
4422 expand_float (target, from, 0);
4423 emit_jump_insn (gen_jump (label));
4424 emit_barrier ();
4426 /* The sign bit is set.
4427 Convert to a usable (positive signed) value by shifting right
4428 one bit, while remembering if a nonzero bit was shifted
4429 out; i.e., compute (from & 1) | (from >> 1). */
4431 emit_label (neglabel);
4432 temp = expand_binop (imode, and_optab, from, const1_rtx,
4433 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4434 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
4435 NULL_RTX, 1);
4436 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4437 OPTAB_LIB_WIDEN);
4438 expand_float (target, temp, 0);
4440 /* Multiply by 2 to undo the shift above. */
4441 temp = expand_binop (fmode, add_optab, target, target,
4442 target, 0, OPTAB_LIB_WIDEN);
4443 if (temp != target)
4444 emit_move_insn (target, temp);
4446 do_pending_stack_adjust ();
4447 emit_label (label);
4448 goto done;
4452 /* If we are about to do some arithmetic to correct for an
4453 unsigned operand, do it in a pseudo-register. */
4455 if (GET_MODE (to) != fmode
4456 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4457 target = gen_reg_rtx (fmode);
4459 /* Convert as signed integer to floating. */
4460 expand_float (target, from, 0);
4462 /* If FROM is negative (and therefore TO is negative),
4463 correct its value by 2**bitwidth. */
4465 do_pending_stack_adjust ();
4466 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
4467 0, label);
4470 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)));
4471 temp = expand_binop (fmode, add_optab, target,
4472 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
4473 target, 0, OPTAB_LIB_WIDEN);
4474 if (temp != target)
4475 emit_move_insn (target, temp);
4477 do_pending_stack_adjust ();
4478 emit_label (label);
4479 goto done;
4482 /* No hardware instruction available; call a library routine. */
4484 rtx libfunc;
4485 rtx insns;
4486 rtx value;
4487 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4489 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
4490 from = convert_to_mode (SImode, from, unsignedp);
4492 if (flag_force_mem)
4493 from = force_not_mem (from);
4495 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4496 gcc_assert (libfunc);
4498 start_sequence ();
4500 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4501 GET_MODE (to), 1, from,
4502 GET_MODE (from));
4503 insns = get_insns ();
4504 end_sequence ();
4506 emit_libcall_block (insns, target, value,
4507 gen_rtx_FLOAT (GET_MODE (to), from));
4510 done:
4512 /* Copy result to requested destination
4513 if we have been computing in a temp location. */
4515 if (target != to)
4517 if (GET_MODE (target) == GET_MODE (to))
4518 emit_move_insn (to, target);
4519 else
4520 convert_move (to, target, 0);
4524 /* Generate code to convert FROM to fixed point and store in TO. FROM
4525 must be floating point. */
4527 void
4528 expand_fix (rtx to, rtx from, int unsignedp)
4530 enum insn_code icode;
4531 rtx target = to;
4532 enum machine_mode fmode, imode;
4533 int must_trunc = 0;
4535 /* We first try to find a pair of modes, one real and one integer, at
4536 least as wide as FROM and TO, respectively, in which we can open-code
4537 this conversion. If the integer mode is wider than the mode of TO,
4538 we can do the conversion either signed or unsigned. */
4540 for (fmode = GET_MODE (from); fmode != VOIDmode;
4541 fmode = GET_MODE_WIDER_MODE (fmode))
4542 for (imode = GET_MODE (to); imode != VOIDmode;
4543 imode = GET_MODE_WIDER_MODE (imode))
4545 int doing_unsigned = unsignedp;
4547 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
4548 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
4549 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
4551 if (icode != CODE_FOR_nothing)
4553 if (fmode != GET_MODE (from))
4554 from = convert_to_mode (fmode, from, 0);
4556 if (must_trunc)
4558 rtx temp = gen_reg_rtx (GET_MODE (from));
4559 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
4560 temp, 0);
4563 if (imode != GET_MODE (to))
4564 target = gen_reg_rtx (imode);
4566 emit_unop_insn (icode, target, from,
4567 doing_unsigned ? UNSIGNED_FIX : FIX);
4568 if (target != to)
4569 convert_move (to, target, unsignedp);
4570 return;
4574 /* For an unsigned conversion, there is one more way to do it.
4575 If we have a signed conversion, we generate code that compares
4576 the real value to the largest representable positive number. If if
4577 is smaller, the conversion is done normally. Otherwise, subtract
4578 one plus the highest signed number, convert, and add it back.
4580 We only need to check all real modes, since we know we didn't find
4581 anything with a wider integer mode.
4583 This code used to extend FP value into mode wider than the destination.
4584 This is not needed. Consider, for instance conversion from SFmode
4585 into DImode.
4587 The hot path trought the code is dealing with inputs smaller than 2^63
4588 and doing just the conversion, so there is no bits to lose.
4590 In the other path we know the value is positive in the range 2^63..2^64-1
4591 inclusive. (as for other imput overflow happens and result is undefined)
4592 So we know that the most important bit set in mantissa corresponds to
4593 2^63. The subtraction of 2^63 should not generate any rounding as it
4594 simply clears out that bit. The rest is trivial. */
4596 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
4597 for (fmode = GET_MODE (from); fmode != VOIDmode;
4598 fmode = GET_MODE_WIDER_MODE (fmode))
4599 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0,
4600 &must_trunc))
4602 int bitsize;
4603 REAL_VALUE_TYPE offset;
4604 rtx limit, lab1, lab2, insn;
4606 bitsize = GET_MODE_BITSIZE (GET_MODE (to));
4607 real_2expN (&offset, bitsize - 1);
4608 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
4609 lab1 = gen_label_rtx ();
4610 lab2 = gen_label_rtx ();
4612 if (flag_force_mem)
4613 from = force_not_mem (from);
4615 if (fmode != GET_MODE (from))
4616 from = convert_to_mode (fmode, from, 0);
4618 /* See if we need to do the subtraction. */
4619 do_pending_stack_adjust ();
4620 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
4621 0, lab1);
4623 /* If not, do the signed "fix" and branch around fixup code. */
4624 expand_fix (to, from, 0);
4625 emit_jump_insn (gen_jump (lab2));
4626 emit_barrier ();
4628 /* Otherwise, subtract 2**(N-1), convert to signed number,
4629 then add 2**(N-1). Do the addition using XOR since this
4630 will often generate better code. */
4631 emit_label (lab1);
4632 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
4633 NULL_RTX, 0, OPTAB_LIB_WIDEN);
4634 expand_fix (to, target, 0);
4635 target = expand_binop (GET_MODE (to), xor_optab, to,
4636 gen_int_mode
4637 ((HOST_WIDE_INT) 1 << (bitsize - 1),
4638 GET_MODE (to)),
4639 to, 1, OPTAB_LIB_WIDEN);
4641 if (target != to)
4642 emit_move_insn (to, target);
4644 emit_label (lab2);
4646 if (mov_optab->handlers[(int) GET_MODE (to)].insn_code
4647 != CODE_FOR_nothing)
4649 /* Make a place for a REG_NOTE and add it. */
4650 insn = emit_move_insn (to, to);
4651 set_unique_reg_note (insn,
4652 REG_EQUAL,
4653 gen_rtx_fmt_e (UNSIGNED_FIX,
4654 GET_MODE (to),
4655 copy_rtx (from)));
4658 return;
4661 /* We can't do it with an insn, so use a library call. But first ensure
4662 that the mode of TO is at least as wide as SImode, since those are the
4663 only library calls we know about. */
4665 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
4667 target = gen_reg_rtx (SImode);
4669 expand_fix (target, from, unsignedp);
4671 else
4673 rtx insns;
4674 rtx value;
4675 rtx libfunc;
4677 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
4678 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4679 gcc_assert (libfunc);
4681 if (flag_force_mem)
4682 from = force_not_mem (from);
4684 start_sequence ();
4686 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4687 GET_MODE (to), 1, from,
4688 GET_MODE (from));
4689 insns = get_insns ();
4690 end_sequence ();
4692 emit_libcall_block (insns, target, value,
4693 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
4694 GET_MODE (to), from));
4697 if (target != to)
4699 if (GET_MODE (to) == GET_MODE (target))
4700 emit_move_insn (to, target);
4701 else
4702 convert_move (to, target, 0);
4706 /* Report whether we have an instruction to perform the operation
4707 specified by CODE on operands of mode MODE. */
4709 have_insn_for (enum rtx_code code, enum machine_mode mode)
4711 return (code_to_optab[(int) code] != 0
4712 && (code_to_optab[(int) code]->handlers[(int) mode].insn_code
4713 != CODE_FOR_nothing));
4716 /* Create a blank optab. */
4717 static optab
4718 new_optab (void)
4720 int i;
4721 optab op = ggc_alloc (sizeof (struct optab));
4722 for (i = 0; i < NUM_MACHINE_MODES; i++)
4724 op->handlers[i].insn_code = CODE_FOR_nothing;
4725 op->handlers[i].libfunc = 0;
4728 return op;
4731 static convert_optab
4732 new_convert_optab (void)
4734 int i, j;
4735 convert_optab op = ggc_alloc (sizeof (struct convert_optab));
4736 for (i = 0; i < NUM_MACHINE_MODES; i++)
4737 for (j = 0; j < NUM_MACHINE_MODES; j++)
4739 op->handlers[i][j].insn_code = CODE_FOR_nothing;
4740 op->handlers[i][j].libfunc = 0;
4742 return op;
4745 /* Same, but fill in its code as CODE, and write it into the
4746 code_to_optab table. */
4747 static inline optab
4748 init_optab (enum rtx_code code)
4750 optab op = new_optab ();
4751 op->code = code;
4752 code_to_optab[(int) code] = op;
4753 return op;
4756 /* Same, but fill in its code as CODE, and do _not_ write it into
4757 the code_to_optab table. */
4758 static inline optab
4759 init_optabv (enum rtx_code code)
4761 optab op = new_optab ();
4762 op->code = code;
4763 return op;
4766 /* Conversion optabs never go in the code_to_optab table. */
4767 static inline convert_optab
4768 init_convert_optab (enum rtx_code code)
4770 convert_optab op = new_convert_optab ();
4771 op->code = code;
4772 return op;
4775 /* Initialize the libfunc fields of an entire group of entries in some
4776 optab. Each entry is set equal to a string consisting of a leading
4777 pair of underscores followed by a generic operation name followed by
4778 a mode name (downshifted to lowercase) followed by a single character
4779 representing the number of operands for the given operation (which is
4780 usually one of the characters '2', '3', or '4').
4782 OPTABLE is the table in which libfunc fields are to be initialized.
4783 FIRST_MODE is the first machine mode index in the given optab to
4784 initialize.
4785 LAST_MODE is the last machine mode index in the given optab to
4786 initialize.
4787 OPNAME is the generic (string) name of the operation.
4788 SUFFIX is the character which specifies the number of operands for
4789 the given generic operation.
4792 static void
4793 init_libfuncs (optab optable, int first_mode, int last_mode,
4794 const char *opname, int suffix)
4796 int mode;
4797 unsigned opname_len = strlen (opname);
4799 for (mode = first_mode; (int) mode <= (int) last_mode;
4800 mode = (enum machine_mode) ((int) mode + 1))
4802 const char *mname = GET_MODE_NAME (mode);
4803 unsigned mname_len = strlen (mname);
4804 char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1);
4805 char *p;
4806 const char *q;
4808 p = libfunc_name;
4809 *p++ = '_';
4810 *p++ = '_';
4811 for (q = opname; *q; )
4812 *p++ = *q++;
4813 for (q = mname; *q; q++)
4814 *p++ = TOLOWER (*q);
4815 *p++ = suffix;
4816 *p = '\0';
4818 optable->handlers[(int) mode].libfunc
4819 = init_one_libfunc (ggc_alloc_string (libfunc_name, p - libfunc_name));
4823 /* Initialize the libfunc fields of an entire group of entries in some
4824 optab which correspond to all integer mode operations. The parameters
4825 have the same meaning as similarly named ones for the `init_libfuncs'
4826 routine. (See above). */
4828 static void
4829 init_integral_libfuncs (optab optable, const char *opname, int suffix)
4831 int maxsize = 2*BITS_PER_WORD;
4832 if (maxsize < LONG_LONG_TYPE_SIZE)
4833 maxsize = LONG_LONG_TYPE_SIZE;
4834 init_libfuncs (optable, word_mode,
4835 mode_for_size (maxsize, MODE_INT, 0),
4836 opname, suffix);
4839 /* Initialize the libfunc fields of an entire group of entries in some
4840 optab which correspond to all real mode operations. The parameters
4841 have the same meaning as similarly named ones for the `init_libfuncs'
4842 routine. (See above). */
4844 static void
4845 init_floating_libfuncs (optab optable, const char *opname, int suffix)
4847 init_libfuncs (optable, MIN_MODE_FLOAT, MAX_MODE_FLOAT, opname, suffix);
4850 /* Initialize the libfunc fields of an entire group of entries of an
4851 inter-mode-class conversion optab. The string formation rules are
4852 similar to the ones for init_libfuncs, above, but instead of having
4853 a mode name and an operand count these functions have two mode names
4854 and no operand count. */
4855 static void
4856 init_interclass_conv_libfuncs (convert_optab tab, const char *opname,
4857 enum mode_class from_class,
4858 enum mode_class to_class)
4860 enum machine_mode first_from_mode = GET_CLASS_NARROWEST_MODE (from_class);
4861 enum machine_mode first_to_mode = GET_CLASS_NARROWEST_MODE (to_class);
4862 size_t opname_len = strlen (opname);
4863 size_t max_mname_len = 0;
4865 enum machine_mode fmode, tmode;
4866 const char *fname, *tname;
4867 const char *q;
4868 char *libfunc_name, *suffix;
4869 char *p;
4871 for (fmode = first_from_mode;
4872 fmode != VOIDmode;
4873 fmode = GET_MODE_WIDER_MODE (fmode))
4874 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (fmode)));
4876 for (tmode = first_to_mode;
4877 tmode != VOIDmode;
4878 tmode = GET_MODE_WIDER_MODE (tmode))
4879 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (tmode)));
4881 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
4882 libfunc_name[0] = '_';
4883 libfunc_name[1] = '_';
4884 memcpy (&libfunc_name[2], opname, opname_len);
4885 suffix = libfunc_name + opname_len + 2;
4887 for (fmode = first_from_mode; fmode != VOIDmode;
4888 fmode = GET_MODE_WIDER_MODE (fmode))
4889 for (tmode = first_to_mode; tmode != VOIDmode;
4890 tmode = GET_MODE_WIDER_MODE (tmode))
4892 fname = GET_MODE_NAME (fmode);
4893 tname = GET_MODE_NAME (tmode);
4895 p = suffix;
4896 for (q = fname; *q; p++, q++)
4897 *p = TOLOWER (*q);
4898 for (q = tname; *q; p++, q++)
4899 *p = TOLOWER (*q);
4901 *p = '\0';
4903 tab->handlers[tmode][fmode].libfunc
4904 = init_one_libfunc (ggc_alloc_string (libfunc_name,
4905 p - libfunc_name));
4909 /* Initialize the libfunc fields of an entire group of entries of an
4910 intra-mode-class conversion optab. The string formation rules are
4911 similar to the ones for init_libfunc, above. WIDENING says whether
4912 the optab goes from narrow to wide modes or vice versa. These functions
4913 have two mode names _and_ an operand count. */
4914 static void
4915 init_intraclass_conv_libfuncs (convert_optab tab, const char *opname,
4916 enum mode_class class, bool widening)
4918 enum machine_mode first_mode = GET_CLASS_NARROWEST_MODE (class);
4919 size_t opname_len = strlen (opname);
4920 size_t max_mname_len = 0;
4922 enum machine_mode nmode, wmode;
4923 const char *nname, *wname;
4924 const char *q;
4925 char *libfunc_name, *suffix;
4926 char *p;
4928 for (nmode = first_mode; nmode != VOIDmode;
4929 nmode = GET_MODE_WIDER_MODE (nmode))
4930 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (nmode)));
4932 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
4933 libfunc_name[0] = '_';
4934 libfunc_name[1] = '_';
4935 memcpy (&libfunc_name[2], opname, opname_len);
4936 suffix = libfunc_name + opname_len + 2;
4938 for (nmode = first_mode; nmode != VOIDmode;
4939 nmode = GET_MODE_WIDER_MODE (nmode))
4940 for (wmode = GET_MODE_WIDER_MODE (nmode); wmode != VOIDmode;
4941 wmode = GET_MODE_WIDER_MODE (wmode))
4943 nname = GET_MODE_NAME (nmode);
4944 wname = GET_MODE_NAME (wmode);
4946 p = suffix;
4947 for (q = widening ? nname : wname; *q; p++, q++)
4948 *p = TOLOWER (*q);
4949 for (q = widening ? wname : nname; *q; p++, q++)
4950 *p = TOLOWER (*q);
4952 *p++ = '2';
4953 *p = '\0';
4955 tab->handlers[widening ? wmode : nmode]
4956 [widening ? nmode : wmode].libfunc
4957 = init_one_libfunc (ggc_alloc_string (libfunc_name,
4958 p - libfunc_name));
4964 init_one_libfunc (const char *name)
4966 rtx symbol;
4968 /* Create a FUNCTION_DECL that can be passed to
4969 targetm.encode_section_info. */
4970 /* ??? We don't have any type information except for this is
4971 a function. Pretend this is "int foo()". */
4972 tree decl = build_decl (FUNCTION_DECL, get_identifier (name),
4973 build_function_type (integer_type_node, NULL_TREE));
4974 DECL_ARTIFICIAL (decl) = 1;
4975 DECL_EXTERNAL (decl) = 1;
4976 TREE_PUBLIC (decl) = 1;
4978 symbol = XEXP (DECL_RTL (decl), 0);
4980 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
4981 are the flags assigned by targetm.encode_section_info. */
4982 SYMBOL_REF_DECL (symbol) = 0;
4984 return symbol;
4987 /* Call this to reset the function entry for one optab (OPTABLE) in mode
4988 MODE to NAME, which should be either 0 or a string constant. */
4989 void
4990 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
4992 if (name)
4993 optable->handlers[mode].libfunc = init_one_libfunc (name);
4994 else
4995 optable->handlers[mode].libfunc = 0;
4998 /* Call this to reset the function entry for one conversion optab
4999 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
5000 either 0 or a string constant. */
5001 void
5002 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
5003 enum machine_mode fmode, const char *name)
5005 if (name)
5006 optable->handlers[tmode][fmode].libfunc = init_one_libfunc (name);
5007 else
5008 optable->handlers[tmode][fmode].libfunc = 0;
5011 /* Call this once to initialize the contents of the optabs
5012 appropriately for the current target machine. */
5014 void
5015 init_optabs (void)
5017 unsigned int i;
5019 /* Start by initializing all tables to contain CODE_FOR_nothing. */
5021 for (i = 0; i < NUM_RTX_CODE; i++)
5022 setcc_gen_code[i] = CODE_FOR_nothing;
5024 #ifdef HAVE_conditional_move
5025 for (i = 0; i < NUM_MACHINE_MODES; i++)
5026 movcc_gen_code[i] = CODE_FOR_nothing;
5027 #endif
5029 for (i = 0; i < NUM_MACHINE_MODES; i++)
5031 vcond_gen_code[i] = CODE_FOR_nothing;
5032 vcondu_gen_code[i] = CODE_FOR_nothing;
5035 add_optab = init_optab (PLUS);
5036 addv_optab = init_optabv (PLUS);
5037 sub_optab = init_optab (MINUS);
5038 subv_optab = init_optabv (MINUS);
5039 smul_optab = init_optab (MULT);
5040 smulv_optab = init_optabv (MULT);
5041 smul_highpart_optab = init_optab (UNKNOWN);
5042 umul_highpart_optab = init_optab (UNKNOWN);
5043 smul_widen_optab = init_optab (UNKNOWN);
5044 umul_widen_optab = init_optab (UNKNOWN);
5045 sdiv_optab = init_optab (DIV);
5046 sdivv_optab = init_optabv (DIV);
5047 sdivmod_optab = init_optab (UNKNOWN);
5048 udiv_optab = init_optab (UDIV);
5049 udivmod_optab = init_optab (UNKNOWN);
5050 smod_optab = init_optab (MOD);
5051 umod_optab = init_optab (UMOD);
5052 fmod_optab = init_optab (UNKNOWN);
5053 drem_optab = init_optab (UNKNOWN);
5054 ftrunc_optab = init_optab (UNKNOWN);
5055 and_optab = init_optab (AND);
5056 ior_optab = init_optab (IOR);
5057 xor_optab = init_optab (XOR);
5058 ashl_optab = init_optab (ASHIFT);
5059 ashr_optab = init_optab (ASHIFTRT);
5060 lshr_optab = init_optab (LSHIFTRT);
5061 rotl_optab = init_optab (ROTATE);
5062 rotr_optab = init_optab (ROTATERT);
5063 smin_optab = init_optab (SMIN);
5064 smax_optab = init_optab (SMAX);
5065 umin_optab = init_optab (UMIN);
5066 umax_optab = init_optab (UMAX);
5067 pow_optab = init_optab (UNKNOWN);
5068 atan2_optab = init_optab (UNKNOWN);
5070 /* These three have codes assigned exclusively for the sake of
5071 have_insn_for. */
5072 mov_optab = init_optab (SET);
5073 movstrict_optab = init_optab (STRICT_LOW_PART);
5074 cmp_optab = init_optab (COMPARE);
5076 ucmp_optab = init_optab (UNKNOWN);
5077 tst_optab = init_optab (UNKNOWN);
5079 eq_optab = init_optab (EQ);
5080 ne_optab = init_optab (NE);
5081 gt_optab = init_optab (GT);
5082 ge_optab = init_optab (GE);
5083 lt_optab = init_optab (LT);
5084 le_optab = init_optab (LE);
5085 unord_optab = init_optab (UNORDERED);
5087 neg_optab = init_optab (NEG);
5088 negv_optab = init_optabv (NEG);
5089 abs_optab = init_optab (ABS);
5090 absv_optab = init_optabv (ABS);
5091 addcc_optab = init_optab (UNKNOWN);
5092 one_cmpl_optab = init_optab (NOT);
5093 ffs_optab = init_optab (FFS);
5094 clz_optab = init_optab (CLZ);
5095 ctz_optab = init_optab (CTZ);
5096 popcount_optab = init_optab (POPCOUNT);
5097 parity_optab = init_optab (PARITY);
5098 sqrt_optab = init_optab (SQRT);
5099 floor_optab = init_optab (UNKNOWN);
5100 lfloor_optab = init_optab (UNKNOWN);
5101 ceil_optab = init_optab (UNKNOWN);
5102 lceil_optab = init_optab (UNKNOWN);
5103 round_optab = init_optab (UNKNOWN);
5104 btrunc_optab = init_optab (UNKNOWN);
5105 nearbyint_optab = init_optab (UNKNOWN);
5106 rint_optab = init_optab (UNKNOWN);
5107 lrint_optab = init_optab (UNKNOWN);
5108 sincos_optab = init_optab (UNKNOWN);
5109 sin_optab = init_optab (UNKNOWN);
5110 asin_optab = init_optab (UNKNOWN);
5111 cos_optab = init_optab (UNKNOWN);
5112 acos_optab = init_optab (UNKNOWN);
5113 exp_optab = init_optab (UNKNOWN);
5114 exp10_optab = init_optab (UNKNOWN);
5115 exp2_optab = init_optab (UNKNOWN);
5116 expm1_optab = init_optab (UNKNOWN);
5117 ldexp_optab = init_optab (UNKNOWN);
5118 logb_optab = init_optab (UNKNOWN);
5119 ilogb_optab = init_optab (UNKNOWN);
5120 log_optab = init_optab (UNKNOWN);
5121 log10_optab = init_optab (UNKNOWN);
5122 log2_optab = init_optab (UNKNOWN);
5123 log1p_optab = init_optab (UNKNOWN);
5124 tan_optab = init_optab (UNKNOWN);
5125 atan_optab = init_optab (UNKNOWN);
5126 copysign_optab = init_optab (UNKNOWN);
5128 strlen_optab = init_optab (UNKNOWN);
5129 cbranch_optab = init_optab (UNKNOWN);
5130 cmov_optab = init_optab (UNKNOWN);
5131 cstore_optab = init_optab (UNKNOWN);
5132 push_optab = init_optab (UNKNOWN);
5134 reduc_smax_optab = init_optab (UNKNOWN);
5135 reduc_umax_optab = init_optab (UNKNOWN);
5136 reduc_smin_optab = init_optab (UNKNOWN);
5137 reduc_umin_optab = init_optab (UNKNOWN);
5138 reduc_splus_optab = init_optab (UNKNOWN);
5139 reduc_uplus_optab = init_optab (UNKNOWN);
5141 vec_extract_optab = init_optab (UNKNOWN);
5142 vec_set_optab = init_optab (UNKNOWN);
5143 vec_init_optab = init_optab (UNKNOWN);
5144 vec_shl_optab = init_optab (UNKNOWN);
5145 vec_shr_optab = init_optab (UNKNOWN);
5146 vec_realign_load_optab = init_optab (UNKNOWN);
5147 movmisalign_optab = init_optab (UNKNOWN);
5149 powi_optab = init_optab (UNKNOWN);
5151 /* Conversions. */
5152 sext_optab = init_convert_optab (SIGN_EXTEND);
5153 zext_optab = init_convert_optab (ZERO_EXTEND);
5154 trunc_optab = init_convert_optab (TRUNCATE);
5155 sfix_optab = init_convert_optab (FIX);
5156 ufix_optab = init_convert_optab (UNSIGNED_FIX);
5157 sfixtrunc_optab = init_convert_optab (UNKNOWN);
5158 ufixtrunc_optab = init_convert_optab (UNKNOWN);
5159 sfloat_optab = init_convert_optab (FLOAT);
5160 ufloat_optab = init_convert_optab (UNSIGNED_FLOAT);
5162 for (i = 0; i < NUM_MACHINE_MODES; i++)
5164 movmem_optab[i] = CODE_FOR_nothing;
5165 clrmem_optab[i] = CODE_FOR_nothing;
5166 cmpstr_optab[i] = CODE_FOR_nothing;
5167 cmpmem_optab[i] = CODE_FOR_nothing;
5169 sync_add_optab[i] = CODE_FOR_nothing;
5170 sync_sub_optab[i] = CODE_FOR_nothing;
5171 sync_ior_optab[i] = CODE_FOR_nothing;
5172 sync_and_optab[i] = CODE_FOR_nothing;
5173 sync_xor_optab[i] = CODE_FOR_nothing;
5174 sync_nand_optab[i] = CODE_FOR_nothing;
5175 sync_old_add_optab[i] = CODE_FOR_nothing;
5176 sync_old_sub_optab[i] = CODE_FOR_nothing;
5177 sync_old_ior_optab[i] = CODE_FOR_nothing;
5178 sync_old_and_optab[i] = CODE_FOR_nothing;
5179 sync_old_xor_optab[i] = CODE_FOR_nothing;
5180 sync_old_nand_optab[i] = CODE_FOR_nothing;
5181 sync_new_add_optab[i] = CODE_FOR_nothing;
5182 sync_new_sub_optab[i] = CODE_FOR_nothing;
5183 sync_new_ior_optab[i] = CODE_FOR_nothing;
5184 sync_new_and_optab[i] = CODE_FOR_nothing;
5185 sync_new_xor_optab[i] = CODE_FOR_nothing;
5186 sync_new_nand_optab[i] = CODE_FOR_nothing;
5187 sync_compare_and_swap[i] = CODE_FOR_nothing;
5188 sync_compare_and_swap_cc[i] = CODE_FOR_nothing;
5189 sync_lock_test_and_set[i] = CODE_FOR_nothing;
5190 sync_lock_release[i] = CODE_FOR_nothing;
5192 #ifdef HAVE_SECONDARY_RELOADS
5193 reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
5194 #endif
5197 /* Fill in the optabs with the insns we support. */
5198 init_all_optabs ();
5200 /* Initialize the optabs with the names of the library functions. */
5201 init_integral_libfuncs (add_optab, "add", '3');
5202 init_floating_libfuncs (add_optab, "add", '3');
5203 init_integral_libfuncs (addv_optab, "addv", '3');
5204 init_floating_libfuncs (addv_optab, "add", '3');
5205 init_integral_libfuncs (sub_optab, "sub", '3');
5206 init_floating_libfuncs (sub_optab, "sub", '3');
5207 init_integral_libfuncs (subv_optab, "subv", '3');
5208 init_floating_libfuncs (subv_optab, "sub", '3');
5209 init_integral_libfuncs (smul_optab, "mul", '3');
5210 init_floating_libfuncs (smul_optab, "mul", '3');
5211 init_integral_libfuncs (smulv_optab, "mulv", '3');
5212 init_floating_libfuncs (smulv_optab, "mul", '3');
5213 init_integral_libfuncs (sdiv_optab, "div", '3');
5214 init_floating_libfuncs (sdiv_optab, "div", '3');
5215 init_integral_libfuncs (sdivv_optab, "divv", '3');
5216 init_integral_libfuncs (udiv_optab, "udiv", '3');
5217 init_integral_libfuncs (sdivmod_optab, "divmod", '4');
5218 init_integral_libfuncs (udivmod_optab, "udivmod", '4');
5219 init_integral_libfuncs (smod_optab, "mod", '3');
5220 init_integral_libfuncs (umod_optab, "umod", '3');
5221 init_floating_libfuncs (ftrunc_optab, "ftrunc", '2');
5222 init_integral_libfuncs (and_optab, "and", '3');
5223 init_integral_libfuncs (ior_optab, "ior", '3');
5224 init_integral_libfuncs (xor_optab, "xor", '3');
5225 init_integral_libfuncs (ashl_optab, "ashl", '3');
5226 init_integral_libfuncs (ashr_optab, "ashr", '3');
5227 init_integral_libfuncs (lshr_optab, "lshr", '3');
5228 init_integral_libfuncs (smin_optab, "min", '3');
5229 init_floating_libfuncs (smin_optab, "min", '3');
5230 init_integral_libfuncs (smax_optab, "max", '3');
5231 init_floating_libfuncs (smax_optab, "max", '3');
5232 init_integral_libfuncs (umin_optab, "umin", '3');
5233 init_integral_libfuncs (umax_optab, "umax", '3');
5234 init_integral_libfuncs (neg_optab, "neg", '2');
5235 init_floating_libfuncs (neg_optab, "neg", '2');
5236 init_integral_libfuncs (negv_optab, "negv", '2');
5237 init_floating_libfuncs (negv_optab, "neg", '2');
5238 init_integral_libfuncs (one_cmpl_optab, "one_cmpl", '2');
5239 init_integral_libfuncs (ffs_optab, "ffs", '2');
5240 init_integral_libfuncs (clz_optab, "clz", '2');
5241 init_integral_libfuncs (ctz_optab, "ctz", '2');
5242 init_integral_libfuncs (popcount_optab, "popcount", '2');
5243 init_integral_libfuncs (parity_optab, "parity", '2');
5245 /* Comparison libcalls for integers MUST come in pairs,
5246 signed/unsigned. */
5247 init_integral_libfuncs (cmp_optab, "cmp", '2');
5248 init_integral_libfuncs (ucmp_optab, "ucmp", '2');
5249 init_floating_libfuncs (cmp_optab, "cmp", '2');
5251 /* EQ etc are floating point only. */
5252 init_floating_libfuncs (eq_optab, "eq", '2');
5253 init_floating_libfuncs (ne_optab, "ne", '2');
5254 init_floating_libfuncs (gt_optab, "gt", '2');
5255 init_floating_libfuncs (ge_optab, "ge", '2');
5256 init_floating_libfuncs (lt_optab, "lt", '2');
5257 init_floating_libfuncs (le_optab, "le", '2');
5258 init_floating_libfuncs (unord_optab, "unord", '2');
5260 init_floating_libfuncs (powi_optab, "powi", '2');
5262 /* Conversions. */
5263 init_interclass_conv_libfuncs (sfloat_optab, "float",
5264 MODE_INT, MODE_FLOAT);
5265 init_interclass_conv_libfuncs (sfix_optab, "fix",
5266 MODE_FLOAT, MODE_INT);
5267 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5268 MODE_FLOAT, MODE_INT);
5270 /* sext_optab is also used for FLOAT_EXTEND. */
5271 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, true);
5272 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, false);
5274 /* Use cabs for double complex abs, since systems generally have cabs.
5275 Don't define any libcall for float complex, so that cabs will be used. */
5276 if (complex_double_type_node)
5277 abs_optab->handlers[TYPE_MODE (complex_double_type_node)].libfunc
5278 = init_one_libfunc ("cabs");
5280 /* The ffs function operates on `int'. */
5281 ffs_optab->handlers[(int) mode_for_size (INT_TYPE_SIZE, MODE_INT, 0)].libfunc
5282 = init_one_libfunc ("ffs");
5284 abort_libfunc = init_one_libfunc ("abort");
5285 memcpy_libfunc = init_one_libfunc ("memcpy");
5286 memmove_libfunc = init_one_libfunc ("memmove");
5287 memcmp_libfunc = init_one_libfunc ("memcmp");
5288 memset_libfunc = init_one_libfunc ("memset");
5289 setbits_libfunc = init_one_libfunc ("__setbits");
5291 #ifndef DONT_USE_BUILTIN_SETJMP
5292 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
5293 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
5294 #else
5295 setjmp_libfunc = init_one_libfunc ("setjmp");
5296 longjmp_libfunc = init_one_libfunc ("longjmp");
5297 #endif
5298 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
5299 unwind_sjlj_unregister_libfunc
5300 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5302 /* For function entry/exit instrumentation. */
5303 profile_function_entry_libfunc
5304 = init_one_libfunc ("__cyg_profile_func_enter");
5305 profile_function_exit_libfunc
5306 = init_one_libfunc ("__cyg_profile_func_exit");
5308 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
5310 if (HAVE_conditional_trap)
5311 trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
5313 /* Allow the target to add more libcalls or rename some, etc. */
5314 targetm.init_libfuncs ();
5317 #ifdef DEBUG
5319 /* Print information about the current contents of the optabs on
5320 STDERR. */
5322 static void
5323 debug_optab_libfuncs (void)
5325 int i;
5326 int j;
5327 int k;
5329 /* Dump the arithmetic optabs. */
5330 for (i = 0; i != (int) OTI_MAX; i++)
5331 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5333 optab o;
5334 struct optab_handlers *h;
5336 o = optab_table[i];
5337 h = &o->handlers[j];
5338 if (h->libfunc)
5340 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5341 fprintf (stderr, "%s\t%s:\t%s\n",
5342 GET_RTX_NAME (o->code),
5343 GET_MODE_NAME (j),
5344 XSTR (h->libfunc, 0));
5348 /* Dump the conversion optabs. */
5349 for (i = 0; i < (int) CTI_MAX; ++i)
5350 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5351 for (k = 0; k < NUM_MACHINE_MODES; ++k)
5353 convert_optab o;
5354 struct optab_handlers *h;
5356 o = &convert_optab_table[i];
5357 h = &o->handlers[j][k];
5358 if (h->libfunc)
5360 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5361 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
5362 GET_RTX_NAME (o->code),
5363 GET_MODE_NAME (j),
5364 GET_MODE_NAME (k),
5365 XSTR (h->libfunc, 0));
5370 #endif /* DEBUG */
5373 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5374 CODE. Return 0 on failure. */
5377 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1,
5378 rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED)
5380 enum machine_mode mode = GET_MODE (op1);
5381 enum insn_code icode;
5382 rtx insn;
5384 if (!HAVE_conditional_trap)
5385 return 0;
5387 if (mode == VOIDmode)
5388 return 0;
5390 icode = cmp_optab->handlers[(int) mode].insn_code;
5391 if (icode == CODE_FOR_nothing)
5392 return 0;
5394 start_sequence ();
5395 op1 = prepare_operand (icode, op1, 0, mode, mode, 0);
5396 op2 = prepare_operand (icode, op2, 1, mode, mode, 0);
5397 if (!op1 || !op2)
5399 end_sequence ();
5400 return 0;
5402 emit_insn (GEN_FCN (icode) (op1, op2));
5404 PUT_CODE (trap_rtx, code);
5405 gcc_assert (HAVE_conditional_trap);
5406 insn = gen_conditional_trap (trap_rtx, tcode);
5407 if (insn)
5409 emit_insn (insn);
5410 insn = get_insns ();
5412 end_sequence ();
5414 return insn;
5417 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5418 or unsigned operation code. */
5420 static enum rtx_code
5421 get_rtx_code (enum tree_code tcode, bool unsignedp)
5423 enum rtx_code code;
5424 switch (tcode)
5426 case EQ_EXPR:
5427 code = EQ;
5428 break;
5429 case NE_EXPR:
5430 code = NE;
5431 break;
5432 case LT_EXPR:
5433 code = unsignedp ? LTU : LT;
5434 break;
5435 case LE_EXPR:
5436 code = unsignedp ? LEU : LE;
5437 break;
5438 case GT_EXPR:
5439 code = unsignedp ? GTU : GT;
5440 break;
5441 case GE_EXPR:
5442 code = unsignedp ? GEU : GE;
5443 break;
5445 case UNORDERED_EXPR:
5446 code = UNORDERED;
5447 break;
5448 case ORDERED_EXPR:
5449 code = ORDERED;
5450 break;
5451 case UNLT_EXPR:
5452 code = UNLT;
5453 break;
5454 case UNLE_EXPR:
5455 code = UNLE;
5456 break;
5457 case UNGT_EXPR:
5458 code = UNGT;
5459 break;
5460 case UNGE_EXPR:
5461 code = UNGE;
5462 break;
5463 case UNEQ_EXPR:
5464 code = UNEQ;
5465 break;
5466 case LTGT_EXPR:
5467 code = LTGT;
5468 break;
5470 default:
5471 gcc_unreachable ();
5473 return code;
5476 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5477 unsigned operators. Do not generate compare instruction. */
5479 static rtx
5480 vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
5482 enum rtx_code rcode;
5483 tree t_op0, t_op1;
5484 rtx rtx_op0, rtx_op1;
5486 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
5487 ensures that condition is a relational operation. */
5488 gcc_assert (COMPARISON_CLASS_P (cond));
5490 rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
5491 t_op0 = TREE_OPERAND (cond, 0);
5492 t_op1 = TREE_OPERAND (cond, 1);
5494 /* Expand operands. */
5495 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)), 1);
5496 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)), 1);
5498 if (!insn_data[icode].operand[4].predicate (rtx_op0, GET_MODE (rtx_op0))
5499 && GET_MODE (rtx_op0) != VOIDmode)
5500 rtx_op0 = force_reg (GET_MODE (rtx_op0), rtx_op0);
5502 if (!insn_data[icode].operand[5].predicate (rtx_op1, GET_MODE (rtx_op1))
5503 && GET_MODE (rtx_op1) != VOIDmode)
5504 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
5506 return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1);
5509 /* Return insn code for VEC_COND_EXPR EXPR. */
5511 static inline enum insn_code
5512 get_vcond_icode (tree expr, enum machine_mode mode)
5514 enum insn_code icode = CODE_FOR_nothing;
5516 if (TYPE_UNSIGNED (TREE_TYPE (expr)))
5517 icode = vcondu_gen_code[mode];
5518 else
5519 icode = vcond_gen_code[mode];
5520 return icode;
5523 /* Return TRUE iff, appropriate vector insns are available
5524 for vector cond expr expr in VMODE mode. */
5526 bool
5527 expand_vec_cond_expr_p (tree expr, enum machine_mode vmode)
5529 if (get_vcond_icode (expr, vmode) == CODE_FOR_nothing)
5530 return false;
5531 return true;
5534 /* Generate insns for VEC_COND_EXPR. */
5537 expand_vec_cond_expr (tree vec_cond_expr, rtx target)
5539 enum insn_code icode;
5540 rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1;
5541 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_cond_expr));
5542 bool unsignedp = TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr));
5544 icode = get_vcond_icode (vec_cond_expr, mode);
5545 if (icode == CODE_FOR_nothing)
5546 return 0;
5548 if (!target)
5549 target = gen_reg_rtx (mode);
5551 /* Get comparison rtx. First expand both cond expr operands. */
5552 comparison = vector_compare_rtx (TREE_OPERAND (vec_cond_expr, 0),
5553 unsignedp, icode);
5554 cc_op0 = XEXP (comparison, 0);
5555 cc_op1 = XEXP (comparison, 1);
5556 /* Expand both operands and force them in reg, if required. */
5557 rtx_op1 = expand_expr (TREE_OPERAND (vec_cond_expr, 1),
5558 NULL_RTX, VOIDmode, 1);
5559 if (!insn_data[icode].operand[1].predicate (rtx_op1, mode)
5560 && mode != VOIDmode)
5561 rtx_op1 = force_reg (mode, rtx_op1);
5563 rtx_op2 = expand_expr (TREE_OPERAND (vec_cond_expr, 2),
5564 NULL_RTX, VOIDmode, 1);
5565 if (!insn_data[icode].operand[2].predicate (rtx_op2, mode)
5566 && mode != VOIDmode)
5567 rtx_op2 = force_reg (mode, rtx_op2);
5569 /* Emit instruction! */
5570 emit_insn (GEN_FCN (icode) (target, rtx_op1, rtx_op2,
5571 comparison, cc_op0, cc_op1));
5573 return target;
5577 /* This is an internal subroutine of the other compare_and_swap expanders.
5578 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
5579 operation. TARGET is an optional place to store the value result of
5580 the operation. ICODE is the particular instruction to expand. Return
5581 the result of the operation. */
5583 static rtx
5584 expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val,
5585 rtx target, enum insn_code icode)
5587 enum machine_mode mode = GET_MODE (mem);
5588 rtx insn;
5590 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5591 target = gen_reg_rtx (mode);
5593 if (GET_MODE (old_val) != VOIDmode && GET_MODE (old_val) != mode)
5594 old_val = convert_modes (mode, GET_MODE (old_val), old_val, 1);
5595 if (!insn_data[icode].operand[2].predicate (old_val, mode))
5596 old_val = force_reg (mode, old_val);
5598 if (GET_MODE (new_val) != VOIDmode && GET_MODE (new_val) != mode)
5599 new_val = convert_modes (mode, GET_MODE (new_val), new_val, 1);
5600 if (!insn_data[icode].operand[3].predicate (new_val, mode))
5601 new_val = force_reg (mode, new_val);
5603 insn = GEN_FCN (icode) (target, mem, old_val, new_val);
5604 if (insn == NULL_RTX)
5605 return NULL_RTX;
5606 emit_insn (insn);
5608 return target;
5611 /* Expand a compare-and-swap operation and return its value. */
5614 expand_val_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
5616 enum machine_mode mode = GET_MODE (mem);
5617 enum insn_code icode = sync_compare_and_swap[mode];
5619 if (icode == CODE_FOR_nothing)
5620 return NULL_RTX;
5622 return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode);
5625 /* Expand a compare-and-swap operation and store true into the result if
5626 the operation was successful and false otherwise. Return the result.
5627 Unlike other routines, TARGET is not optional. */
5630 expand_bool_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
5632 enum machine_mode mode = GET_MODE (mem);
5633 enum insn_code icode;
5634 rtx subtarget, label0, label1;
5636 /* If the target supports a compare-and-swap pattern that simultaneously
5637 sets some flag for success, then use it. Otherwise use the regular
5638 compare-and-swap and follow that immediately with a compare insn. */
5639 icode = sync_compare_and_swap_cc[mode];
5640 switch (icode)
5642 default:
5643 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
5644 NULL_RTX, icode);
5645 if (subtarget != NULL_RTX)
5646 break;
5648 /* FALLTHRU */
5649 case CODE_FOR_nothing:
5650 icode = sync_compare_and_swap[mode];
5651 if (icode == CODE_FOR_nothing)
5652 return NULL_RTX;
5654 /* Ensure that if old_val == mem, that we're not comparing
5655 against an old value. */
5656 if (MEM_P (old_val))
5657 old_val = force_reg (mode, old_val);
5659 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
5660 NULL_RTX, icode);
5661 if (subtarget == NULL_RTX)
5662 return NULL_RTX;
5664 emit_cmp_insn (subtarget, old_val, EQ, const0_rtx, mode, true);
5667 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
5668 setcc instruction from the beginning. We don't work too hard here,
5669 but it's nice to not be stupid about initial code gen either. */
5670 if (STORE_FLAG_VALUE == 1)
5672 icode = setcc_gen_code[EQ];
5673 if (icode != CODE_FOR_nothing)
5675 enum machine_mode cmode = insn_data[icode].operand[0].mode;
5676 rtx insn;
5678 subtarget = target;
5679 if (!insn_data[icode].operand[0].predicate (target, cmode))
5680 subtarget = gen_reg_rtx (cmode);
5682 insn = GEN_FCN (icode) (subtarget);
5683 if (insn)
5685 emit_insn (insn);
5686 if (GET_MODE (target) != GET_MODE (subtarget))
5688 convert_move (target, subtarget, 1);
5689 subtarget = target;
5691 return subtarget;
5696 /* Without an appropriate setcc instruction, use a set of branches to
5697 get 1 and 0 stored into target. Presumably if the target has a
5698 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
5700 label0 = gen_label_rtx ();
5701 label1 = gen_label_rtx ();
5703 emit_jump_insn (bcc_gen_fctn[EQ] (label0));
5704 emit_move_insn (target, const0_rtx);
5705 emit_jump_insn (gen_jump (label1));
5706 emit_barrier ();
5707 emit_label (label0);
5708 emit_move_insn (target, const1_rtx);
5709 emit_label (label1);
5711 return target;
5714 /* This is a helper function for the other atomic operations. This function
5715 emits a loop that contains SEQ that iterates until a compare-and-swap
5716 operation at the end succeeds. MEM is the memory to be modified. SEQ is
5717 a set of instructions that takes a value from OLD_REG as an input and
5718 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
5719 set to the current contents of MEM. After SEQ, a compare-and-swap will
5720 attempt to update MEM with NEW_REG. The function returns true when the
5721 loop was generated successfully. */
5723 static bool
5724 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
5726 enum machine_mode mode = GET_MODE (mem);
5727 enum insn_code icode;
5728 rtx label, cmp_reg, subtarget;
5730 /* The loop we want to generate looks like
5732 cmp_reg = mem;
5733 label:
5734 old_reg = cmp_reg;
5735 seq;
5736 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
5737 if (cmp_reg != old_reg)
5738 goto label;
5740 Note that we only do the plain load from memory once. Subsequent
5741 iterations use the value loaded by the compare-and-swap pattern. */
5743 label = gen_label_rtx ();
5744 cmp_reg = gen_reg_rtx (mode);
5746 emit_move_insn (cmp_reg, mem);
5747 emit_label (label);
5748 emit_move_insn (old_reg, cmp_reg);
5749 if (seq)
5750 emit_insn (seq);
5752 /* If the target supports a compare-and-swap pattern that simultaneously
5753 sets some flag for success, then use it. Otherwise use the regular
5754 compare-and-swap and follow that immediately with a compare insn. */
5755 icode = sync_compare_and_swap_cc[mode];
5756 switch (icode)
5758 default:
5759 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
5760 cmp_reg, icode);
5761 if (subtarget != NULL_RTX)
5763 gcc_assert (subtarget == cmp_reg);
5764 break;
5767 /* FALLTHRU */
5768 case CODE_FOR_nothing:
5769 icode = sync_compare_and_swap[mode];
5770 if (icode == CODE_FOR_nothing)
5771 return false;
5773 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
5774 cmp_reg, icode);
5775 if (subtarget == NULL_RTX)
5776 return false;
5777 if (subtarget != cmp_reg)
5778 emit_move_insn (cmp_reg, subtarget);
5780 emit_cmp_insn (cmp_reg, old_reg, EQ, const0_rtx, mode, true);
5783 /* ??? Mark this jump predicted not taken? */
5784 emit_jump_insn (bcc_gen_fctn[NE] (label));
5786 return true;
5789 /* This function generates the atomic operation MEM CODE= VAL. In this
5790 case, we do not care about any resulting value. Returns NULL if we
5791 cannot generate the operation. */
5794 expand_sync_operation (rtx mem, rtx val, enum rtx_code code)
5796 enum machine_mode mode = GET_MODE (mem);
5797 enum insn_code icode;
5798 rtx insn;
5800 /* Look to see if the target supports the operation directly. */
5801 switch (code)
5803 case PLUS:
5804 icode = sync_add_optab[mode];
5805 break;
5806 case IOR:
5807 icode = sync_ior_optab[mode];
5808 break;
5809 case XOR:
5810 icode = sync_xor_optab[mode];
5811 break;
5812 case AND:
5813 icode = sync_and_optab[mode];
5814 break;
5815 case NOT:
5816 icode = sync_nand_optab[mode];
5817 break;
5819 case MINUS:
5820 icode = sync_sub_optab[mode];
5821 if (icode == CODE_FOR_nothing)
5823 icode = sync_add_optab[mode];
5824 if (icode != CODE_FOR_nothing)
5826 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
5827 code = PLUS;
5830 break;
5832 default:
5833 gcc_unreachable ();
5836 /* Generate the direct operation, if present. */
5837 if (icode != CODE_FOR_nothing)
5839 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
5840 val = convert_modes (mode, GET_MODE (val), val, 1);
5841 if (!insn_data[icode].operand[1].predicate (val, mode))
5842 val = force_reg (mode, val);
5844 insn = GEN_FCN (icode) (mem, val);
5845 if (insn)
5847 emit_insn (insn);
5848 return const0_rtx;
5852 /* Failing that, generate a compare-and-swap loop in which we perform the
5853 operation with normal arithmetic instructions. */
5854 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
5856 rtx t0 = gen_reg_rtx (mode), t1;
5858 start_sequence ();
5860 t1 = t0;
5861 if (code == NOT)
5863 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
5864 code = AND;
5866 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
5867 true, OPTAB_LIB_WIDEN);
5869 insn = get_insns ();
5870 end_sequence ();
5872 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
5873 return const0_rtx;
5876 return NULL_RTX;
5879 /* This function generates the atomic operation MEM CODE= VAL. In this
5880 case, we do care about the resulting value: if AFTER is true then
5881 return the value MEM holds after the operation, if AFTER is false
5882 then return the value MEM holds before the operation. TARGET is an
5883 optional place for the result value to be stored. */
5886 expand_sync_fetch_operation (rtx mem, rtx val, enum rtx_code code,
5887 bool after, rtx target)
5889 enum machine_mode mode = GET_MODE (mem);
5890 enum insn_code old_code, new_code, icode;
5891 bool compensate;
5892 rtx insn;
5894 /* Look to see if the target supports the operation directly. */
5895 switch (code)
5897 case PLUS:
5898 old_code = sync_old_add_optab[mode];
5899 new_code = sync_new_add_optab[mode];
5900 break;
5901 case IOR:
5902 old_code = sync_old_ior_optab[mode];
5903 new_code = sync_new_ior_optab[mode];
5904 break;
5905 case XOR:
5906 old_code = sync_old_xor_optab[mode];
5907 new_code = sync_new_xor_optab[mode];
5908 break;
5909 case AND:
5910 old_code = sync_old_and_optab[mode];
5911 new_code = sync_new_and_optab[mode];
5912 break;
5913 case NOT:
5914 old_code = sync_old_nand_optab[mode];
5915 new_code = sync_new_nand_optab[mode];
5916 break;
5918 case MINUS:
5919 old_code = sync_old_sub_optab[mode];
5920 new_code = sync_new_sub_optab[mode];
5921 if (old_code == CODE_FOR_nothing && new_code == CODE_FOR_nothing)
5923 old_code = sync_old_add_optab[mode];
5924 new_code = sync_new_add_optab[mode];
5925 if (old_code != CODE_FOR_nothing || new_code != CODE_FOR_nothing)
5927 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
5928 code = PLUS;
5931 break;
5933 default:
5934 gcc_unreachable ();
5937 /* If the target does supports the proper new/old operation, great. But
5938 if we only support the opposite old/new operation, check to see if we
5939 can compensate. In the case in which the old value is supported, then
5940 we can always perform the operation again with normal arithmetic. In
5941 the case in which the new value is supported, then we can only handle
5942 this in the case the operation is reversible. */
5943 compensate = false;
5944 if (after)
5946 icode = new_code;
5947 if (icode == CODE_FOR_nothing)
5949 icode = old_code;
5950 if (icode != CODE_FOR_nothing)
5951 compensate = true;
5954 else
5956 icode = old_code;
5957 if (icode == CODE_FOR_nothing
5958 && (code == PLUS || code == MINUS || code == XOR))
5960 icode = new_code;
5961 if (icode != CODE_FOR_nothing)
5962 compensate = true;
5966 /* If we found something supported, great. */
5967 if (icode != CODE_FOR_nothing)
5969 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5970 target = gen_reg_rtx (mode);
5972 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
5973 val = convert_modes (mode, GET_MODE (val), val, 1);
5974 if (!insn_data[icode].operand[2].predicate (val, mode))
5975 val = force_reg (mode, val);
5977 insn = GEN_FCN (icode) (target, mem, val);
5978 if (insn)
5980 emit_insn (insn);
5982 /* If we need to compensate for using an operation with the
5983 wrong return value, do so now. */
5984 if (compensate)
5986 if (!after)
5988 if (code == PLUS)
5989 code = MINUS;
5990 else if (code == MINUS)
5991 code = PLUS;
5994 if (code == NOT)
5995 target = expand_simple_unop (mode, NOT, target, NULL_RTX, true);
5996 target = expand_simple_binop (mode, code, target, val, NULL_RTX,
5997 true, OPTAB_LIB_WIDEN);
6000 return target;
6004 /* Failing that, generate a compare-and-swap loop in which we perform the
6005 operation with normal arithmetic instructions. */
6006 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6008 rtx t0 = gen_reg_rtx (mode), t1;
6010 if (!target || !register_operand (target, mode))
6011 target = gen_reg_rtx (mode);
6013 start_sequence ();
6015 if (!after)
6016 emit_move_insn (target, t0);
6017 t1 = t0;
6018 if (code == NOT)
6020 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6021 code = AND;
6023 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6024 true, OPTAB_LIB_WIDEN);
6025 if (after)
6026 emit_move_insn (target, t1);
6028 insn = get_insns ();
6029 end_sequence ();
6031 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6032 return target;
6035 return NULL_RTX;
6038 /* This function expands a test-and-set operation. Ideally we atomically
6039 store VAL in MEM and return the previous value in MEM. Some targets
6040 may not support this operation and only support VAL with the constant 1;
6041 in this case while the return value will be 0/1, but the exact value
6042 stored in MEM is target defined. TARGET is an option place to stick
6043 the return value. */
6046 expand_sync_lock_test_and_set (rtx mem, rtx val, rtx target)
6048 enum machine_mode mode = GET_MODE (mem);
6049 enum insn_code icode;
6050 rtx insn;
6052 /* If the target supports the test-and-set directly, great. */
6053 icode = sync_lock_test_and_set[mode];
6054 if (icode != CODE_FOR_nothing)
6056 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6057 target = gen_reg_rtx (mode);
6059 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6060 val = convert_modes (mode, GET_MODE (val), val, 1);
6061 if (!insn_data[icode].operand[2].predicate (val, mode))
6062 val = force_reg (mode, val);
6064 insn = GEN_FCN (icode) (target, mem, val);
6065 if (insn)
6067 emit_insn (insn);
6068 return target;
6072 /* Otherwise, use a compare-and-swap loop for the exchange. */
6073 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6075 if (!target || !register_operand (target, mode))
6076 target = gen_reg_rtx (mode);
6077 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6078 val = convert_modes (mode, GET_MODE (val), val, 1);
6079 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
6080 return target;
6083 return NULL_RTX;
6086 #include "gt-optabs.h"