PR rtl-optimization/23837
[official-gcc.git] / gcc / optabs.c
blob45bfbbde4822be67bede5dc28cd9624ab290a37f
1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "toplev.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
32 #include "rtl.h"
33 #include "tree.h"
34 #include "tm_p.h"
35 #include "flags.h"
36 #include "function.h"
37 #include "except.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "libfuncs.h"
41 #include "recog.h"
42 #include "reload.h"
43 #include "ggc.h"
44 #include "real.h"
45 #include "basic-block.h"
46 #include "target.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 optab optab_table[OTI_MAX];
59 rtx libfunc_table[LTI_MAX];
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table[COI_MAX];
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab[NUM_RTX_CODE + 1];
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
70 rtxfun bcc_gen_fctn[NUM_RTX_CODE];
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
76 enum insn_code setcc_gen_code[NUM_RTX_CODE];
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
84 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
85 #endif
87 /* Indexed by the machine mode, gives the insn code for vector conditional
88 operation. */
90 enum insn_code vcond_gen_code[NUM_MACHINE_MODES];
91 enum insn_code vcondu_gen_code[NUM_MACHINE_MODES];
93 /* The insn generating function can not take an rtx_code argument.
94 TRAP_RTX is used as an rtx argument. Its code is replaced with
95 the code to be used in the trap insn and all other fields are ignored. */
96 static GTY(()) rtx trap_rtx;
98 static int add_equal_note (rtx, rtx, enum rtx_code, rtx, rtx);
99 static rtx widen_operand (rtx, enum machine_mode, enum machine_mode, int,
100 int);
101 static void prepare_cmp_insn (rtx *, rtx *, enum rtx_code *, rtx,
102 enum machine_mode *, int *,
103 enum can_compare_purpose);
104 static enum insn_code can_fix_p (enum machine_mode, enum machine_mode, int,
105 int *);
106 static enum insn_code can_float_p (enum machine_mode, enum machine_mode, int);
107 static optab new_optab (void);
108 static convert_optab new_convert_optab (void);
109 static inline optab init_optab (enum rtx_code);
110 static inline optab init_optabv (enum rtx_code);
111 static inline convert_optab init_convert_optab (enum rtx_code);
112 static void init_libfuncs (optab, int, int, const char *, int);
113 static void init_integral_libfuncs (optab, const char *, int);
114 static void init_floating_libfuncs (optab, const char *, int);
115 static void init_interclass_conv_libfuncs (convert_optab, const char *,
116 enum mode_class, enum mode_class);
117 static void init_intraclass_conv_libfuncs (convert_optab, const char *,
118 enum mode_class, bool);
119 static void emit_cmp_and_jump_insn_1 (rtx, rtx, enum machine_mode,
120 enum rtx_code, int, rtx);
121 static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *,
122 enum machine_mode *, int *);
123 static rtx widen_clz (enum machine_mode, rtx, rtx);
124 static rtx expand_parity (enum machine_mode, rtx, rtx);
125 static enum rtx_code get_rtx_code (enum tree_code, bool);
126 static rtx vector_compare_rtx (tree, bool, enum insn_code);
128 #ifndef HAVE_conditional_trap
129 #define HAVE_conditional_trap 0
130 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
131 #endif
133 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
134 the result of operation CODE applied to OP0 (and OP1 if it is a binary
135 operation).
137 If the last insn does not set TARGET, don't do anything, but return 1.
139 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
140 don't add the REG_EQUAL note but return 0. Our caller can then try
141 again, ensuring that TARGET is not one of the operands. */
143 static int
144 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
146 rtx last_insn, insn, set;
147 rtx note;
149 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
151 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
152 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
153 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
154 && GET_RTX_CLASS (code) != RTX_COMPARE
155 && GET_RTX_CLASS (code) != RTX_UNARY)
156 return 1;
158 if (GET_CODE (target) == ZERO_EXTRACT)
159 return 1;
161 for (last_insn = insns;
162 NEXT_INSN (last_insn) != NULL_RTX;
163 last_insn = NEXT_INSN (last_insn))
166 set = single_set (last_insn);
167 if (set == NULL_RTX)
168 return 1;
170 if (! rtx_equal_p (SET_DEST (set), target)
171 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
172 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
173 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
174 return 1;
176 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
177 besides the last insn. */
178 if (reg_overlap_mentioned_p (target, op0)
179 || (op1 && reg_overlap_mentioned_p (target, op1)))
181 insn = PREV_INSN (last_insn);
182 while (insn != NULL_RTX)
184 if (reg_set_p (target, insn))
185 return 0;
187 insn = PREV_INSN (insn);
191 if (GET_RTX_CLASS (code) == RTX_UNARY)
192 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
193 else
194 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
196 set_unique_reg_note (last_insn, REG_EQUAL, note);
198 return 1;
201 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
202 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
203 not actually do a sign-extend or zero-extend, but can leave the
204 higher-order bits of the result rtx undefined, for example, in the case
205 of logical operations, but not right shifts. */
207 static rtx
208 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
209 int unsignedp, int no_extend)
211 rtx result;
213 /* If we don't have to extend and this is a constant, return it. */
214 if (no_extend && GET_MODE (op) == VOIDmode)
215 return op;
217 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
218 extend since it will be more efficient to do so unless the signedness of
219 a promoted object differs from our extension. */
220 if (! no_extend
221 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
222 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
223 return convert_modes (mode, oldmode, op, unsignedp);
225 /* If MODE is no wider than a single word, we return a paradoxical
226 SUBREG. */
227 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
228 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
230 /* Otherwise, get an object of MODE, clobber it, and set the low-order
231 part to OP. */
233 result = gen_reg_rtx (mode);
234 emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
235 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
236 return result;
239 /* Return the optab used for computing the operation given by
240 the tree code, CODE. This function is not always usable (for
241 example, it cannot give complete results for multiplication
242 or division) but probably ought to be relied on more widely
243 throughout the expander. */
244 optab
245 optab_for_tree_code (enum tree_code code, tree type)
247 bool trapv;
248 switch (code)
250 case BIT_AND_EXPR:
251 return and_optab;
253 case BIT_IOR_EXPR:
254 return ior_optab;
256 case BIT_NOT_EXPR:
257 return one_cmpl_optab;
259 case BIT_XOR_EXPR:
260 return xor_optab;
262 case TRUNC_MOD_EXPR:
263 case CEIL_MOD_EXPR:
264 case FLOOR_MOD_EXPR:
265 case ROUND_MOD_EXPR:
266 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
268 case RDIV_EXPR:
269 case TRUNC_DIV_EXPR:
270 case CEIL_DIV_EXPR:
271 case FLOOR_DIV_EXPR:
272 case ROUND_DIV_EXPR:
273 case EXACT_DIV_EXPR:
274 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
276 case LSHIFT_EXPR:
277 return ashl_optab;
279 case RSHIFT_EXPR:
280 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
282 case LROTATE_EXPR:
283 return rotl_optab;
285 case RROTATE_EXPR:
286 return rotr_optab;
288 case MAX_EXPR:
289 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
291 case MIN_EXPR:
292 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
294 case REALIGN_LOAD_EXPR:
295 return vec_realign_load_optab;
297 case REDUC_MAX_EXPR:
298 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
300 case REDUC_MIN_EXPR:
301 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
303 case REDUC_PLUS_EXPR:
304 return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
306 case VEC_LSHIFT_EXPR:
307 return vec_shl_optab;
309 case VEC_RSHIFT_EXPR:
310 return vec_shr_optab;
312 default:
313 break;
316 trapv = flag_trapv && INTEGRAL_TYPE_P (type) && !TYPE_UNSIGNED (type);
317 switch (code)
319 case PLUS_EXPR:
320 return trapv ? addv_optab : add_optab;
322 case MINUS_EXPR:
323 return trapv ? subv_optab : sub_optab;
325 case MULT_EXPR:
326 return trapv ? smulv_optab : smul_optab;
328 case NEGATE_EXPR:
329 return trapv ? negv_optab : neg_optab;
331 case ABS_EXPR:
332 return trapv ? absv_optab : abs_optab;
334 default:
335 return NULL;
340 /* Generate code to perform an operation specified by TERNARY_OPTAB
341 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
343 UNSIGNEDP is for the case where we have to widen the operands
344 to perform the operation. It says to use zero-extension.
346 If TARGET is nonzero, the value
347 is generated there, if it is convenient to do so.
348 In all cases an rtx is returned for the locus of the value;
349 this may or may not be TARGET. */
352 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
353 rtx op1, rtx op2, rtx target, int unsignedp)
355 int icode = (int) ternary_optab->handlers[(int) mode].insn_code;
356 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
357 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
358 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
359 rtx temp;
360 rtx pat;
361 rtx xop0 = op0, xop1 = op1, xop2 = op2;
363 gcc_assert (ternary_optab->handlers[(int) mode].insn_code
364 != CODE_FOR_nothing);
366 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
367 temp = gen_reg_rtx (mode);
368 else
369 temp = target;
371 /* In case the insn wants input operands in modes different from
372 those of the actual operands, convert the operands. It would
373 seem that we don't need to convert CONST_INTs, but we do, so
374 that they're properly zero-extended, sign-extended or truncated
375 for their mode. */
377 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
378 xop0 = convert_modes (mode0,
379 GET_MODE (op0) != VOIDmode
380 ? GET_MODE (op0)
381 : mode,
382 xop0, unsignedp);
384 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
385 xop1 = convert_modes (mode1,
386 GET_MODE (op1) != VOIDmode
387 ? GET_MODE (op1)
388 : mode,
389 xop1, unsignedp);
391 if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
392 xop2 = convert_modes (mode2,
393 GET_MODE (op2) != VOIDmode
394 ? GET_MODE (op2)
395 : mode,
396 xop2, unsignedp);
398 /* Now, if insn's predicates don't allow our operands, put them into
399 pseudo regs. */
401 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
402 && mode0 != VOIDmode)
403 xop0 = copy_to_mode_reg (mode0, xop0);
405 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
406 && mode1 != VOIDmode)
407 xop1 = copy_to_mode_reg (mode1, xop1);
409 if (!insn_data[icode].operand[3].predicate (xop2, mode2)
410 && mode2 != VOIDmode)
411 xop2 = copy_to_mode_reg (mode2, xop2);
413 pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
415 emit_insn (pat);
416 return temp;
420 /* Like expand_binop, but return a constant rtx if the result can be
421 calculated at compile time. The arguments and return value are
422 otherwise the same as for expand_binop. */
424 static rtx
425 simplify_expand_binop (enum machine_mode mode, optab binoptab,
426 rtx op0, rtx op1, rtx target, int unsignedp,
427 enum optab_methods methods)
429 if (CONSTANT_P (op0) && CONSTANT_P (op1))
430 return simplify_gen_binary (binoptab->code, mode, op0, op1);
431 else
432 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
435 /* Like simplify_expand_binop, but always put the result in TARGET.
436 Return true if the expansion succeeded. */
438 bool
439 force_expand_binop (enum machine_mode mode, optab binoptab,
440 rtx op0, rtx op1, rtx target, int unsignedp,
441 enum optab_methods methods)
443 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
444 target, unsignedp, methods);
445 if (x == 0)
446 return false;
447 if (x != target)
448 emit_move_insn (target, x);
449 return true;
452 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
455 expand_vec_shift_expr (tree vec_shift_expr, rtx target)
457 enum insn_code icode;
458 rtx rtx_op1, rtx_op2;
459 enum machine_mode mode1;
460 enum machine_mode mode2;
461 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_shift_expr));
462 tree vec_oprnd = TREE_OPERAND (vec_shift_expr, 0);
463 tree shift_oprnd = TREE_OPERAND (vec_shift_expr, 1);
464 optab shift_optab;
465 rtx pat;
467 switch (TREE_CODE (vec_shift_expr))
469 case VEC_RSHIFT_EXPR:
470 shift_optab = vec_shr_optab;
471 break;
472 case VEC_LSHIFT_EXPR:
473 shift_optab = vec_shl_optab;
474 break;
475 default:
476 gcc_unreachable ();
479 icode = (int) shift_optab->handlers[(int) mode].insn_code;
480 gcc_assert (icode != CODE_FOR_nothing);
482 mode1 = insn_data[icode].operand[1].mode;
483 mode2 = insn_data[icode].operand[2].mode;
485 rtx_op1 = expand_expr (vec_oprnd, NULL_RTX, VOIDmode, EXPAND_NORMAL);
486 if (!(*insn_data[icode].operand[1].predicate) (rtx_op1, mode1)
487 && mode1 != VOIDmode)
488 rtx_op1 = force_reg (mode1, rtx_op1);
490 rtx_op2 = expand_expr (shift_oprnd, NULL_RTX, VOIDmode, EXPAND_NORMAL);
491 if (!(*insn_data[icode].operand[2].predicate) (rtx_op2, mode2)
492 && mode2 != VOIDmode)
493 rtx_op2 = force_reg (mode2, rtx_op2);
495 if (!target
496 || ! (*insn_data[icode].operand[0].predicate) (target, mode))
497 target = gen_reg_rtx (mode);
499 /* Emit instruction */
500 pat = GEN_FCN (icode) (target, rtx_op1, rtx_op2);
501 gcc_assert (pat);
502 emit_insn (pat);
504 return target;
507 /* This subroutine of expand_doubleword_shift handles the cases in which
508 the effective shift value is >= BITS_PER_WORD. The arguments and return
509 value are the same as for the parent routine, except that SUPERWORD_OP1
510 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
511 INTO_TARGET may be null if the caller has decided to calculate it. */
513 static bool
514 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
515 rtx outof_target, rtx into_target,
516 int unsignedp, enum optab_methods methods)
518 if (into_target != 0)
519 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
520 into_target, unsignedp, methods))
521 return false;
523 if (outof_target != 0)
525 /* For a signed right shift, we must fill OUTOF_TARGET with copies
526 of the sign bit, otherwise we must fill it with zeros. */
527 if (binoptab != ashr_optab)
528 emit_move_insn (outof_target, CONST0_RTX (word_mode));
529 else
530 if (!force_expand_binop (word_mode, binoptab,
531 outof_input, GEN_INT (BITS_PER_WORD - 1),
532 outof_target, unsignedp, methods))
533 return false;
535 return true;
538 /* This subroutine of expand_doubleword_shift handles the cases in which
539 the effective shift value is < BITS_PER_WORD. The arguments and return
540 value are the same as for the parent routine. */
542 static bool
543 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
544 rtx outof_input, rtx into_input, rtx op1,
545 rtx outof_target, rtx into_target,
546 int unsignedp, enum optab_methods methods,
547 unsigned HOST_WIDE_INT shift_mask)
549 optab reverse_unsigned_shift, unsigned_shift;
550 rtx tmp, carries;
552 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
553 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
555 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
556 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
557 the opposite direction to BINOPTAB. */
558 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
560 carries = outof_input;
561 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
562 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
563 0, true, methods);
565 else
567 /* We must avoid shifting by BITS_PER_WORD bits since that is either
568 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
569 has unknown behavior. Do a single shift first, then shift by the
570 remainder. It's OK to use ~OP1 as the remainder if shift counts
571 are truncated to the mode size. */
572 carries = expand_binop (word_mode, reverse_unsigned_shift,
573 outof_input, const1_rtx, 0, unsignedp, methods);
574 if (shift_mask == BITS_PER_WORD - 1)
576 tmp = immed_double_const (-1, -1, op1_mode);
577 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
578 0, true, methods);
580 else
582 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
583 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
584 0, true, methods);
587 if (tmp == 0 || carries == 0)
588 return false;
589 carries = expand_binop (word_mode, reverse_unsigned_shift,
590 carries, tmp, 0, unsignedp, methods);
591 if (carries == 0)
592 return false;
594 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
595 so the result can go directly into INTO_TARGET if convenient. */
596 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
597 into_target, unsignedp, methods);
598 if (tmp == 0)
599 return false;
601 /* Now OR in the bits carried over from OUTOF_INPUT. */
602 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
603 into_target, unsignedp, methods))
604 return false;
606 /* Use a standard word_mode shift for the out-of half. */
607 if (outof_target != 0)
608 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
609 outof_target, unsignedp, methods))
610 return false;
612 return true;
616 #ifdef HAVE_conditional_move
617 /* Try implementing expand_doubleword_shift using conditional moves.
618 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
619 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
620 are the shift counts to use in the former and latter case. All other
621 arguments are the same as the parent routine. */
623 static bool
624 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
625 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
626 rtx outof_input, rtx into_input,
627 rtx subword_op1, rtx superword_op1,
628 rtx outof_target, rtx into_target,
629 int unsignedp, enum optab_methods methods,
630 unsigned HOST_WIDE_INT shift_mask)
632 rtx outof_superword, into_superword;
634 /* Put the superword version of the output into OUTOF_SUPERWORD and
635 INTO_SUPERWORD. */
636 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
637 if (outof_target != 0 && subword_op1 == superword_op1)
639 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
640 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
641 into_superword = outof_target;
642 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
643 outof_superword, 0, unsignedp, methods))
644 return false;
646 else
648 into_superword = gen_reg_rtx (word_mode);
649 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
650 outof_superword, into_superword,
651 unsignedp, methods))
652 return false;
655 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
656 if (!expand_subword_shift (op1_mode, binoptab,
657 outof_input, into_input, subword_op1,
658 outof_target, into_target,
659 unsignedp, methods, shift_mask))
660 return false;
662 /* Select between them. Do the INTO half first because INTO_SUPERWORD
663 might be the current value of OUTOF_TARGET. */
664 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
665 into_target, into_superword, word_mode, false))
666 return false;
668 if (outof_target != 0)
669 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
670 outof_target, outof_superword,
671 word_mode, false))
672 return false;
674 return true;
676 #endif
678 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
679 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
680 input operand; the shift moves bits in the direction OUTOF_INPUT->
681 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
682 of the target. OP1 is the shift count and OP1_MODE is its mode.
683 If OP1 is constant, it will have been truncated as appropriate
684 and is known to be nonzero.
686 If SHIFT_MASK is zero, the result of word shifts is undefined when the
687 shift count is outside the range [0, BITS_PER_WORD). This routine must
688 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
690 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
691 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
692 fill with zeros or sign bits as appropriate.
694 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
695 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
696 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
697 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
698 are undefined.
700 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
701 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
702 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
703 function wants to calculate it itself.
705 Return true if the shift could be successfully synthesized. */
707 static bool
708 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
709 rtx outof_input, rtx into_input, rtx op1,
710 rtx outof_target, rtx into_target,
711 int unsignedp, enum optab_methods methods,
712 unsigned HOST_WIDE_INT shift_mask)
714 rtx superword_op1, tmp, cmp1, cmp2;
715 rtx subword_label, done_label;
716 enum rtx_code cmp_code;
718 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
719 fill the result with sign or zero bits as appropriate. If so, the value
720 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
721 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
722 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
724 This isn't worthwhile for constant shifts since the optimizers will
725 cope better with in-range shift counts. */
726 if (shift_mask >= BITS_PER_WORD
727 && outof_target != 0
728 && !CONSTANT_P (op1))
730 if (!expand_doubleword_shift (op1_mode, binoptab,
731 outof_input, into_input, op1,
732 0, into_target,
733 unsignedp, methods, shift_mask))
734 return false;
735 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
736 outof_target, unsignedp, methods))
737 return false;
738 return true;
741 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
742 is true when the effective shift value is less than BITS_PER_WORD.
743 Set SUPERWORD_OP1 to the shift count that should be used to shift
744 OUTOF_INPUT into INTO_TARGET when the condition is false. */
745 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
746 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
748 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
749 is a subword shift count. */
750 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
751 0, true, methods);
752 cmp2 = CONST0_RTX (op1_mode);
753 cmp_code = EQ;
754 superword_op1 = op1;
756 else
758 /* Set CMP1 to OP1 - BITS_PER_WORD. */
759 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
760 0, true, methods);
761 cmp2 = CONST0_RTX (op1_mode);
762 cmp_code = LT;
763 superword_op1 = cmp1;
765 if (cmp1 == 0)
766 return false;
768 /* If we can compute the condition at compile time, pick the
769 appropriate subroutine. */
770 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
771 if (tmp != 0 && GET_CODE (tmp) == CONST_INT)
773 if (tmp == const0_rtx)
774 return expand_superword_shift (binoptab, outof_input, superword_op1,
775 outof_target, into_target,
776 unsignedp, methods);
777 else
778 return expand_subword_shift (op1_mode, binoptab,
779 outof_input, into_input, op1,
780 outof_target, into_target,
781 unsignedp, methods, shift_mask);
784 #ifdef HAVE_conditional_move
785 /* Try using conditional moves to generate straight-line code. */
787 rtx start = get_last_insn ();
788 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
789 cmp_code, cmp1, cmp2,
790 outof_input, into_input,
791 op1, superword_op1,
792 outof_target, into_target,
793 unsignedp, methods, shift_mask))
794 return true;
795 delete_insns_since (start);
797 #endif
799 /* As a last resort, use branches to select the correct alternative. */
800 subword_label = gen_label_rtx ();
801 done_label = gen_label_rtx ();
803 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
804 0, 0, subword_label);
806 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
807 outof_target, into_target,
808 unsignedp, methods))
809 return false;
811 emit_jump_insn (gen_jump (done_label));
812 emit_barrier ();
813 emit_label (subword_label);
815 if (!expand_subword_shift (op1_mode, binoptab,
816 outof_input, into_input, op1,
817 outof_target, into_target,
818 unsignedp, methods, shift_mask))
819 return false;
821 emit_label (done_label);
822 return true;
825 /* Subroutine of expand_binop. Perform a double word multiplication of
826 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
827 as the target's word_mode. This function return NULL_RTX if anything
828 goes wrong, in which case it may have already emitted instructions
829 which need to be deleted.
831 If we want to multiply two two-word values and have normal and widening
832 multiplies of single-word values, we can do this with three smaller
833 multiplications. Note that we do not make a REG_NO_CONFLICT block here
834 because we are not operating on one word at a time.
836 The multiplication proceeds as follows:
837 _______________________
838 [__op0_high_|__op0_low__]
839 _______________________
840 * [__op1_high_|__op1_low__]
841 _______________________________________________
842 _______________________
843 (1) [__op0_low__*__op1_low__]
844 _______________________
845 (2a) [__op0_low__*__op1_high_]
846 _______________________
847 (2b) [__op0_high_*__op1_low__]
848 _______________________
849 (3) [__op0_high_*__op1_high_]
852 This gives a 4-word result. Since we are only interested in the
853 lower 2 words, partial result (3) and the upper words of (2a) and
854 (2b) don't need to be calculated. Hence (2a) and (2b) can be
855 calculated using non-widening multiplication.
857 (1), however, needs to be calculated with an unsigned widening
858 multiplication. If this operation is not directly supported we
859 try using a signed widening multiplication and adjust the result.
860 This adjustment works as follows:
862 If both operands are positive then no adjustment is needed.
864 If the operands have different signs, for example op0_low < 0 and
865 op1_low >= 0, the instruction treats the most significant bit of
866 op0_low as a sign bit instead of a bit with significance
867 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
868 with 2**BITS_PER_WORD - op0_low, and two's complements the
869 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
870 the result.
872 Similarly, if both operands are negative, we need to add
873 (op0_low + op1_low) * 2**BITS_PER_WORD.
875 We use a trick to adjust quickly. We logically shift op0_low right
876 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
877 op0_high (op1_high) before it is used to calculate 2b (2a). If no
878 logical shift exists, we do an arithmetic right shift and subtract
879 the 0 or -1. */
881 static rtx
882 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
883 bool umulp, enum optab_methods methods)
885 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
886 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
887 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
888 rtx product, adjust, product_high, temp;
890 rtx op0_high = operand_subword_force (op0, high, mode);
891 rtx op0_low = operand_subword_force (op0, low, mode);
892 rtx op1_high = operand_subword_force (op1, high, mode);
893 rtx op1_low = operand_subword_force (op1, low, mode);
895 /* If we're using an unsigned multiply to directly compute the product
896 of the low-order words of the operands and perform any required
897 adjustments of the operands, we begin by trying two more multiplications
898 and then computing the appropriate sum.
900 We have checked above that the required addition is provided.
901 Full-word addition will normally always succeed, especially if
902 it is provided at all, so we don't worry about its failure. The
903 multiplication may well fail, however, so we do handle that. */
905 if (!umulp)
907 /* ??? This could be done with emit_store_flag where available. */
908 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
909 NULL_RTX, 1, methods);
910 if (temp)
911 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
912 NULL_RTX, 0, OPTAB_DIRECT);
913 else
915 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
916 NULL_RTX, 0, methods);
917 if (!temp)
918 return NULL_RTX;
919 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
920 NULL_RTX, 0, OPTAB_DIRECT);
923 if (!op0_high)
924 return NULL_RTX;
927 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
928 NULL_RTX, 0, OPTAB_DIRECT);
929 if (!adjust)
930 return NULL_RTX;
932 /* OP0_HIGH should now be dead. */
934 if (!umulp)
936 /* ??? This could be done with emit_store_flag where available. */
937 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
938 NULL_RTX, 1, methods);
939 if (temp)
940 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
941 NULL_RTX, 0, OPTAB_DIRECT);
942 else
944 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
945 NULL_RTX, 0, methods);
946 if (!temp)
947 return NULL_RTX;
948 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
949 NULL_RTX, 0, OPTAB_DIRECT);
952 if (!op1_high)
953 return NULL_RTX;
956 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
957 NULL_RTX, 0, OPTAB_DIRECT);
958 if (!temp)
959 return NULL_RTX;
961 /* OP1_HIGH should now be dead. */
963 adjust = expand_binop (word_mode, add_optab, adjust, temp,
964 adjust, 0, OPTAB_DIRECT);
966 if (target && !REG_P (target))
967 target = NULL_RTX;
969 if (umulp)
970 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
971 target, 1, OPTAB_DIRECT);
972 else
973 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
974 target, 1, OPTAB_DIRECT);
976 if (!product)
977 return NULL_RTX;
979 product_high = operand_subword (product, high, 1, mode);
980 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
981 REG_P (product_high) ? product_high : adjust,
982 0, OPTAB_DIRECT);
983 emit_move_insn (product_high, adjust);
984 return product;
987 /* Wrapper around expand_binop which takes an rtx code to specify
988 the operation to perform, not an optab pointer. All other
989 arguments are the same. */
991 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
992 rtx op1, rtx target, int unsignedp,
993 enum optab_methods methods)
995 optab binop = code_to_optab[(int) code];
996 gcc_assert (binop);
998 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1001 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1002 binop. Order them according to commutative_operand_precedence and, if
1003 possible, try to put TARGET or a pseudo first. */
1004 static bool
1005 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
1007 int op0_prec = commutative_operand_precedence (op0);
1008 int op1_prec = commutative_operand_precedence (op1);
1010 if (op0_prec < op1_prec)
1011 return true;
1013 if (op0_prec > op1_prec)
1014 return false;
1016 /* With equal precedence, both orders are ok, but it is better if the
1017 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1018 if (target == 0 || REG_P (target))
1019 return (REG_P (op1) && !REG_P (op0)) || target == op1;
1020 else
1021 return rtx_equal_p (op1, target);
1025 /* Generate code to perform an operation specified by BINOPTAB
1026 on operands OP0 and OP1, with result having machine-mode MODE.
1028 UNSIGNEDP is for the case where we have to widen the operands
1029 to perform the operation. It says to use zero-extension.
1031 If TARGET is nonzero, the value
1032 is generated there, if it is convenient to do so.
1033 In all cases an rtx is returned for the locus of the value;
1034 this may or may not be TARGET. */
1037 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
1038 rtx target, int unsignedp, enum optab_methods methods)
1040 enum optab_methods next_methods
1041 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1042 ? OPTAB_WIDEN : methods);
1043 enum mode_class class;
1044 enum machine_mode wider_mode;
1045 rtx temp;
1046 int commutative_op = 0;
1047 int shift_op = (binoptab->code == ASHIFT
1048 || binoptab->code == ASHIFTRT
1049 || binoptab->code == LSHIFTRT
1050 || binoptab->code == ROTATE
1051 || binoptab->code == ROTATERT);
1052 rtx entry_last = get_last_insn ();
1053 rtx last;
1054 bool first_pass_p = true;
1056 class = GET_MODE_CLASS (mode);
1058 /* If subtracting an integer constant, convert this into an addition of
1059 the negated constant. */
1061 if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
1063 op1 = negate_rtx (mode, op1);
1064 binoptab = add_optab;
1067 /* If we are inside an appropriately-short loop and we are optimizing,
1068 force expensive constants into a register. */
1069 if (CONSTANT_P (op0) && optimize
1070 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
1072 if (GET_MODE (op0) != VOIDmode)
1073 op0 = convert_modes (mode, VOIDmode, op0, unsignedp);
1074 op0 = force_reg (mode, op0);
1077 if (CONSTANT_P (op1) && optimize
1078 && ! shift_op && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
1080 if (GET_MODE (op1) != VOIDmode)
1081 op1 = convert_modes (mode, VOIDmode, op1, unsignedp);
1082 op1 = force_reg (mode, op1);
1085 /* Record where to delete back to if we backtrack. */
1086 last = get_last_insn ();
1088 /* If operation is commutative,
1089 try to make the first operand a register.
1090 Even better, try to make it the same as the target.
1091 Also try to make the last operand a constant. */
1092 if (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1093 || binoptab == smul_widen_optab
1094 || binoptab == umul_widen_optab
1095 || binoptab == smul_highpart_optab
1096 || binoptab == umul_highpart_optab)
1098 commutative_op = 1;
1100 if (swap_commutative_operands_with_target (target, op0, op1))
1102 temp = op1;
1103 op1 = op0;
1104 op0 = temp;
1108 retry:
1110 /* If we can do it with a three-operand insn, do so. */
1112 if (methods != OPTAB_MUST_WIDEN
1113 && binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1115 int icode = (int) binoptab->handlers[(int) mode].insn_code;
1116 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1117 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1118 rtx pat;
1119 rtx xop0 = op0, xop1 = op1;
1121 if (target)
1122 temp = target;
1123 else
1124 temp = gen_reg_rtx (mode);
1126 /* If it is a commutative operator and the modes would match
1127 if we would swap the operands, we can save the conversions. */
1128 if (commutative_op)
1130 if (GET_MODE (op0) != mode0 && GET_MODE (op1) != mode1
1131 && GET_MODE (op0) == mode1 && GET_MODE (op1) == mode0)
1133 rtx tmp;
1135 tmp = op0; op0 = op1; op1 = tmp;
1136 tmp = xop0; xop0 = xop1; xop1 = tmp;
1140 /* In case the insn wants input operands in modes different from
1141 those of the actual operands, convert the operands. It would
1142 seem that we don't need to convert CONST_INTs, but we do, so
1143 that they're properly zero-extended, sign-extended or truncated
1144 for their mode. */
1146 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
1147 xop0 = convert_modes (mode0,
1148 GET_MODE (op0) != VOIDmode
1149 ? GET_MODE (op0)
1150 : mode,
1151 xop0, unsignedp);
1153 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
1154 xop1 = convert_modes (mode1,
1155 GET_MODE (op1) != VOIDmode
1156 ? GET_MODE (op1)
1157 : mode,
1158 xop1, unsignedp);
1160 /* Now, if insn's predicates don't allow our operands, put them into
1161 pseudo regs. */
1163 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
1164 && mode0 != VOIDmode)
1165 xop0 = copy_to_mode_reg (mode0, xop0);
1167 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
1168 && mode1 != VOIDmode)
1169 xop1 = copy_to_mode_reg (mode1, xop1);
1171 if (!insn_data[icode].operand[0].predicate (temp, mode))
1172 temp = gen_reg_rtx (mode);
1174 pat = GEN_FCN (icode) (temp, xop0, xop1);
1175 if (pat)
1177 /* If PAT is composed of more than one insn, try to add an appropriate
1178 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1179 operand, call ourselves again, this time without a target. */
1180 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1181 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
1183 delete_insns_since (last);
1184 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1185 unsignedp, methods);
1188 emit_insn (pat);
1189 return temp;
1191 else
1192 delete_insns_since (last);
1195 /* If we were trying to rotate by a constant value, and that didn't
1196 work, try rotating the other direction before falling back to
1197 shifts and bitwise-or. */
1198 if (first_pass_p
1199 && (binoptab == rotl_optab || binoptab == rotr_optab)
1200 && class == MODE_INT
1201 && GET_CODE (op1) == CONST_INT
1202 && INTVAL (op1) > 0
1203 && (unsigned int) INTVAL (op1) < GET_MODE_BITSIZE (mode))
1205 first_pass_p = false;
1206 op1 = GEN_INT (GET_MODE_BITSIZE (mode) - INTVAL (op1));
1207 binoptab = binoptab == rotl_optab ? rotr_optab : rotl_optab;
1208 goto retry;
1211 /* If this is a multiply, see if we can do a widening operation that
1212 takes operands of this mode and makes a wider mode. */
1214 if (binoptab == smul_optab
1215 && GET_MODE_WIDER_MODE (mode) != VOIDmode
1216 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1217 ->handlers[(int) GET_MODE_WIDER_MODE (mode)].insn_code)
1218 != CODE_FOR_nothing))
1220 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
1221 unsignedp ? umul_widen_optab : smul_widen_optab,
1222 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1224 if (temp != 0)
1226 if (GET_MODE_CLASS (mode) == MODE_INT
1227 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1228 GET_MODE_BITSIZE (GET_MODE (temp))))
1229 return gen_lowpart (mode, temp);
1230 else
1231 return convert_to_mode (mode, temp, unsignedp);
1235 /* Look for a wider mode of the same class for which we think we
1236 can open-code the operation. Check for a widening multiply at the
1237 wider mode as well. */
1239 if (CLASS_HAS_WIDER_MODES_P (class)
1240 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1241 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1242 wider_mode != VOIDmode;
1243 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1245 if (binoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
1246 || (binoptab == smul_optab
1247 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1248 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1249 ->handlers[(int) GET_MODE_WIDER_MODE (wider_mode)].insn_code)
1250 != CODE_FOR_nothing)))
1252 rtx xop0 = op0, xop1 = op1;
1253 int no_extend = 0;
1255 /* For certain integer operations, we need not actually extend
1256 the narrow operands, as long as we will truncate
1257 the results to the same narrowness. */
1259 if ((binoptab == ior_optab || binoptab == and_optab
1260 || binoptab == xor_optab
1261 || binoptab == add_optab || binoptab == sub_optab
1262 || binoptab == smul_optab || binoptab == ashl_optab)
1263 && class == MODE_INT)
1264 no_extend = 1;
1266 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1268 /* The second operand of a shift must always be extended. */
1269 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1270 no_extend && binoptab != ashl_optab);
1272 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1273 unsignedp, OPTAB_DIRECT);
1274 if (temp)
1276 if (class != MODE_INT
1277 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1278 GET_MODE_BITSIZE (wider_mode)))
1280 if (target == 0)
1281 target = gen_reg_rtx (mode);
1282 convert_move (target, temp, 0);
1283 return target;
1285 else
1286 return gen_lowpart (mode, temp);
1288 else
1289 delete_insns_since (last);
1293 /* These can be done a word at a time. */
1294 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1295 && class == MODE_INT
1296 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1297 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1299 int i;
1300 rtx insns;
1301 rtx equiv_value;
1303 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1304 won't be accurate, so use a new target. */
1305 if (target == 0 || target == op0 || target == op1)
1306 target = gen_reg_rtx (mode);
1308 start_sequence ();
1310 /* Do the actual arithmetic. */
1311 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1313 rtx target_piece = operand_subword (target, i, 1, mode);
1314 rtx x = expand_binop (word_mode, binoptab,
1315 operand_subword_force (op0, i, mode),
1316 operand_subword_force (op1, i, mode),
1317 target_piece, unsignedp, next_methods);
1319 if (x == 0)
1320 break;
1322 if (target_piece != x)
1323 emit_move_insn (target_piece, x);
1326 insns = get_insns ();
1327 end_sequence ();
1329 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1331 if (binoptab->code != UNKNOWN)
1332 equiv_value
1333 = gen_rtx_fmt_ee (binoptab->code, mode,
1334 copy_rtx (op0), copy_rtx (op1));
1335 else
1336 equiv_value = 0;
1338 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1339 return target;
1343 /* Synthesize double word shifts from single word shifts. */
1344 if ((binoptab == lshr_optab || binoptab == ashl_optab
1345 || binoptab == ashr_optab)
1346 && class == MODE_INT
1347 && (GET_CODE (op1) == CONST_INT || !optimize_size)
1348 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1349 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1350 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1351 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1353 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1354 enum machine_mode op1_mode;
1356 double_shift_mask = targetm.shift_truncation_mask (mode);
1357 shift_mask = targetm.shift_truncation_mask (word_mode);
1358 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1360 /* Apply the truncation to constant shifts. */
1361 if (double_shift_mask > 0 && GET_CODE (op1) == CONST_INT)
1362 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1364 if (op1 == CONST0_RTX (op1_mode))
1365 return op0;
1367 /* Make sure that this is a combination that expand_doubleword_shift
1368 can handle. See the comments there for details. */
1369 if (double_shift_mask == 0
1370 || (shift_mask == BITS_PER_WORD - 1
1371 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1373 rtx insns, equiv_value;
1374 rtx into_target, outof_target;
1375 rtx into_input, outof_input;
1376 int left_shift, outof_word;
1378 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1379 won't be accurate, so use a new target. */
1380 if (target == 0 || target == op0 || target == op1)
1381 target = gen_reg_rtx (mode);
1383 start_sequence ();
1385 /* OUTOF_* is the word we are shifting bits away from, and
1386 INTO_* is the word that we are shifting bits towards, thus
1387 they differ depending on the direction of the shift and
1388 WORDS_BIG_ENDIAN. */
1390 left_shift = binoptab == ashl_optab;
1391 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1393 outof_target = operand_subword (target, outof_word, 1, mode);
1394 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1396 outof_input = operand_subword_force (op0, outof_word, mode);
1397 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1399 if (expand_doubleword_shift (op1_mode, binoptab,
1400 outof_input, into_input, op1,
1401 outof_target, into_target,
1402 unsignedp, methods, shift_mask))
1404 insns = get_insns ();
1405 end_sequence ();
1407 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1408 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1409 return target;
1411 end_sequence ();
1415 /* Synthesize double word rotates from single word shifts. */
1416 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1417 && class == MODE_INT
1418 && GET_CODE (op1) == CONST_INT
1419 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1420 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1421 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1423 rtx insns;
1424 rtx into_target, outof_target;
1425 rtx into_input, outof_input;
1426 rtx inter;
1427 int shift_count, left_shift, outof_word;
1429 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1430 won't be accurate, so use a new target. Do this also if target is not
1431 a REG, first because having a register instead may open optimization
1432 opportunities, and second because if target and op0 happen to be MEMs
1433 designating the same location, we would risk clobbering it too early
1434 in the code sequence we generate below. */
1435 if (target == 0 || target == op0 || target == op1 || ! REG_P (target))
1436 target = gen_reg_rtx (mode);
1438 start_sequence ();
1440 shift_count = INTVAL (op1);
1442 /* OUTOF_* is the word we are shifting bits away from, and
1443 INTO_* is the word that we are shifting bits towards, thus
1444 they differ depending on the direction of the shift and
1445 WORDS_BIG_ENDIAN. */
1447 left_shift = (binoptab == rotl_optab);
1448 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1450 outof_target = operand_subword (target, outof_word, 1, mode);
1451 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1453 outof_input = operand_subword_force (op0, outof_word, mode);
1454 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1456 if (shift_count == BITS_PER_WORD)
1458 /* This is just a word swap. */
1459 emit_move_insn (outof_target, into_input);
1460 emit_move_insn (into_target, outof_input);
1461 inter = const0_rtx;
1463 else
1465 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1466 rtx first_shift_count, second_shift_count;
1467 optab reverse_unsigned_shift, unsigned_shift;
1469 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1470 ? lshr_optab : ashl_optab);
1472 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1473 ? ashl_optab : lshr_optab);
1475 if (shift_count > BITS_PER_WORD)
1477 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1478 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1480 else
1482 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1483 second_shift_count = GEN_INT (shift_count);
1486 into_temp1 = expand_binop (word_mode, unsigned_shift,
1487 outof_input, first_shift_count,
1488 NULL_RTX, unsignedp, next_methods);
1489 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1490 into_input, second_shift_count,
1491 NULL_RTX, unsignedp, next_methods);
1493 if (into_temp1 != 0 && into_temp2 != 0)
1494 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1495 into_target, unsignedp, next_methods);
1496 else
1497 inter = 0;
1499 if (inter != 0 && inter != into_target)
1500 emit_move_insn (into_target, inter);
1502 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1503 into_input, first_shift_count,
1504 NULL_RTX, unsignedp, next_methods);
1505 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1506 outof_input, second_shift_count,
1507 NULL_RTX, unsignedp, next_methods);
1509 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1510 inter = expand_binop (word_mode, ior_optab,
1511 outof_temp1, outof_temp2,
1512 outof_target, unsignedp, next_methods);
1514 if (inter != 0 && inter != outof_target)
1515 emit_move_insn (outof_target, inter);
1518 insns = get_insns ();
1519 end_sequence ();
1521 if (inter != 0)
1523 /* One may be tempted to wrap the insns in a REG_NO_CONFLICT
1524 block to help the register allocator a bit. But a multi-word
1525 rotate will need all the input bits when setting the output
1526 bits, so there clearly is a conflict between the input and
1527 output registers. So we can't use a no-conflict block here. */
1528 emit_insn (insns);
1529 return target;
1533 /* These can be done a word at a time by propagating carries. */
1534 if ((binoptab == add_optab || binoptab == sub_optab)
1535 && class == MODE_INT
1536 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1537 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1539 unsigned int i;
1540 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1541 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1542 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1543 rtx xop0, xop1, xtarget;
1545 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1546 value is one of those, use it. Otherwise, use 1 since it is the
1547 one easiest to get. */
1548 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1549 int normalizep = STORE_FLAG_VALUE;
1550 #else
1551 int normalizep = 1;
1552 #endif
1554 /* Prepare the operands. */
1555 xop0 = force_reg (mode, op0);
1556 xop1 = force_reg (mode, op1);
1558 xtarget = gen_reg_rtx (mode);
1560 if (target == 0 || !REG_P (target))
1561 target = xtarget;
1563 /* Indicate for flow that the entire target reg is being set. */
1564 if (REG_P (target))
1565 emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget));
1567 /* Do the actual arithmetic. */
1568 for (i = 0; i < nwords; i++)
1570 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1571 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1572 rtx op0_piece = operand_subword_force (xop0, index, mode);
1573 rtx op1_piece = operand_subword_force (xop1, index, mode);
1574 rtx x;
1576 /* Main add/subtract of the input operands. */
1577 x = expand_binop (word_mode, binoptab,
1578 op0_piece, op1_piece,
1579 target_piece, unsignedp, next_methods);
1580 if (x == 0)
1581 break;
1583 if (i + 1 < nwords)
1585 /* Store carry from main add/subtract. */
1586 carry_out = gen_reg_rtx (word_mode);
1587 carry_out = emit_store_flag_force (carry_out,
1588 (binoptab == add_optab
1589 ? LT : GT),
1590 x, op0_piece,
1591 word_mode, 1, normalizep);
1594 if (i > 0)
1596 rtx newx;
1598 /* Add/subtract previous carry to main result. */
1599 newx = expand_binop (word_mode,
1600 normalizep == 1 ? binoptab : otheroptab,
1601 x, carry_in,
1602 NULL_RTX, 1, next_methods);
1604 if (i + 1 < nwords)
1606 /* Get out carry from adding/subtracting carry in. */
1607 rtx carry_tmp = gen_reg_rtx (word_mode);
1608 carry_tmp = emit_store_flag_force (carry_tmp,
1609 (binoptab == add_optab
1610 ? LT : GT),
1611 newx, x,
1612 word_mode, 1, normalizep);
1614 /* Logical-ior the two poss. carry together. */
1615 carry_out = expand_binop (word_mode, ior_optab,
1616 carry_out, carry_tmp,
1617 carry_out, 0, next_methods);
1618 if (carry_out == 0)
1619 break;
1621 emit_move_insn (target_piece, newx);
1623 else
1625 if (x != target_piece)
1626 emit_move_insn (target_piece, x);
1629 carry_in = carry_out;
1632 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
1634 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
1635 || ! rtx_equal_p (target, xtarget))
1637 rtx temp = emit_move_insn (target, xtarget);
1639 set_unique_reg_note (temp,
1640 REG_EQUAL,
1641 gen_rtx_fmt_ee (binoptab->code, mode,
1642 copy_rtx (xop0),
1643 copy_rtx (xop1)));
1645 else
1646 target = xtarget;
1648 return target;
1651 else
1652 delete_insns_since (last);
1655 /* Attempt to synthesize double word multiplies using a sequence of word
1656 mode multiplications. We first attempt to generate a sequence using a
1657 more efficient unsigned widening multiply, and if that fails we then
1658 try using a signed widening multiply. */
1660 if (binoptab == smul_optab
1661 && class == MODE_INT
1662 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1663 && smul_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1664 && add_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1666 rtx product = NULL_RTX;
1668 if (umul_widen_optab->handlers[(int) mode].insn_code
1669 != CODE_FOR_nothing)
1671 product = expand_doubleword_mult (mode, op0, op1, target,
1672 true, methods);
1673 if (!product)
1674 delete_insns_since (last);
1677 if (product == NULL_RTX
1678 && smul_widen_optab->handlers[(int) mode].insn_code
1679 != CODE_FOR_nothing)
1681 product = expand_doubleword_mult (mode, op0, op1, target,
1682 false, methods);
1683 if (!product)
1684 delete_insns_since (last);
1687 if (product != NULL_RTX)
1689 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1691 temp = emit_move_insn (target ? target : product, product);
1692 set_unique_reg_note (temp,
1693 REG_EQUAL,
1694 gen_rtx_fmt_ee (MULT, mode,
1695 copy_rtx (op0),
1696 copy_rtx (op1)));
1698 return product;
1702 /* It can't be open-coded in this mode.
1703 Use a library call if one is available and caller says that's ok. */
1705 if (binoptab->handlers[(int) mode].libfunc
1706 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1708 rtx insns;
1709 rtx op1x = op1;
1710 enum machine_mode op1_mode = mode;
1711 rtx value;
1713 start_sequence ();
1715 if (shift_op)
1717 op1_mode = word_mode;
1718 /* Specify unsigned here,
1719 since negative shift counts are meaningless. */
1720 op1x = convert_to_mode (word_mode, op1, 1);
1723 if (GET_MODE (op0) != VOIDmode
1724 && GET_MODE (op0) != mode)
1725 op0 = convert_to_mode (mode, op0, unsignedp);
1727 /* Pass 1 for NO_QUEUE so we don't lose any increments
1728 if the libcall is cse'd or moved. */
1729 value = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
1730 NULL_RTX, LCT_CONST, mode, 2,
1731 op0, mode, op1x, op1_mode);
1733 insns = get_insns ();
1734 end_sequence ();
1736 target = gen_reg_rtx (mode);
1737 emit_libcall_block (insns, target, value,
1738 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
1740 return target;
1743 delete_insns_since (last);
1745 /* It can't be done in this mode. Can we do it in a wider mode? */
1747 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
1748 || methods == OPTAB_MUST_WIDEN))
1750 /* Caller says, don't even try. */
1751 delete_insns_since (entry_last);
1752 return 0;
1755 /* Compute the value of METHODS to pass to recursive calls.
1756 Don't allow widening to be tried recursively. */
1758 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
1760 /* Look for a wider mode of the same class for which it appears we can do
1761 the operation. */
1763 if (CLASS_HAS_WIDER_MODES_P (class))
1765 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1766 wider_mode != VOIDmode;
1767 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1769 if ((binoptab->handlers[(int) wider_mode].insn_code
1770 != CODE_FOR_nothing)
1771 || (methods == OPTAB_LIB
1772 && binoptab->handlers[(int) wider_mode].libfunc))
1774 rtx xop0 = op0, xop1 = op1;
1775 int no_extend = 0;
1777 /* For certain integer operations, we need not actually extend
1778 the narrow operands, as long as we will truncate
1779 the results to the same narrowness. */
1781 if ((binoptab == ior_optab || binoptab == and_optab
1782 || binoptab == xor_optab
1783 || binoptab == add_optab || binoptab == sub_optab
1784 || binoptab == smul_optab || binoptab == ashl_optab)
1785 && class == MODE_INT)
1786 no_extend = 1;
1788 xop0 = widen_operand (xop0, wider_mode, mode,
1789 unsignedp, no_extend);
1791 /* The second operand of a shift must always be extended. */
1792 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1793 no_extend && binoptab != ashl_optab);
1795 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1796 unsignedp, methods);
1797 if (temp)
1799 if (class != MODE_INT
1800 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1801 GET_MODE_BITSIZE (wider_mode)))
1803 if (target == 0)
1804 target = gen_reg_rtx (mode);
1805 convert_move (target, temp, 0);
1806 return target;
1808 else
1809 return gen_lowpart (mode, temp);
1811 else
1812 delete_insns_since (last);
1817 delete_insns_since (entry_last);
1818 return 0;
1821 /* Expand a binary operator which has both signed and unsigned forms.
1822 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1823 signed operations.
1825 If we widen unsigned operands, we may use a signed wider operation instead
1826 of an unsigned wider operation, since the result would be the same. */
1829 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
1830 rtx op0, rtx op1, rtx target, int unsignedp,
1831 enum optab_methods methods)
1833 rtx temp;
1834 optab direct_optab = unsignedp ? uoptab : soptab;
1835 struct optab wide_soptab;
1837 /* Do it without widening, if possible. */
1838 temp = expand_binop (mode, direct_optab, op0, op1, target,
1839 unsignedp, OPTAB_DIRECT);
1840 if (temp || methods == OPTAB_DIRECT)
1841 return temp;
1843 /* Try widening to a signed int. Make a fake signed optab that
1844 hides any signed insn for direct use. */
1845 wide_soptab = *soptab;
1846 wide_soptab.handlers[(int) mode].insn_code = CODE_FOR_nothing;
1847 wide_soptab.handlers[(int) mode].libfunc = 0;
1849 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
1850 unsignedp, OPTAB_WIDEN);
1852 /* For unsigned operands, try widening to an unsigned int. */
1853 if (temp == 0 && unsignedp)
1854 temp = expand_binop (mode, uoptab, op0, op1, target,
1855 unsignedp, OPTAB_WIDEN);
1856 if (temp || methods == OPTAB_WIDEN)
1857 return temp;
1859 /* Use the right width lib call if that exists. */
1860 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
1861 if (temp || methods == OPTAB_LIB)
1862 return temp;
1864 /* Must widen and use a lib call, use either signed or unsigned. */
1865 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
1866 unsignedp, methods);
1867 if (temp != 0)
1868 return temp;
1869 if (unsignedp)
1870 return expand_binop (mode, uoptab, op0, op1, target,
1871 unsignedp, methods);
1872 return 0;
1875 /* Generate code to perform an operation specified by UNOPPTAB
1876 on operand OP0, with two results to TARG0 and TARG1.
1877 We assume that the order of the operands for the instruction
1878 is TARG0, TARG1, OP0.
1880 Either TARG0 or TARG1 may be zero, but what that means is that
1881 the result is not actually wanted. We will generate it into
1882 a dummy pseudo-reg and discard it. They may not both be zero.
1884 Returns 1 if this operation can be performed; 0 if not. */
1887 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
1888 int unsignedp)
1890 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
1891 enum mode_class class;
1892 enum machine_mode wider_mode;
1893 rtx entry_last = get_last_insn ();
1894 rtx last;
1896 class = GET_MODE_CLASS (mode);
1898 if (!targ0)
1899 targ0 = gen_reg_rtx (mode);
1900 if (!targ1)
1901 targ1 = gen_reg_rtx (mode);
1903 /* Record where to go back to if we fail. */
1904 last = get_last_insn ();
1906 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1908 int icode = (int) unoptab->handlers[(int) mode].insn_code;
1909 enum machine_mode mode0 = insn_data[icode].operand[2].mode;
1910 rtx pat;
1911 rtx xop0 = op0;
1913 if (GET_MODE (xop0) != VOIDmode
1914 && GET_MODE (xop0) != mode0)
1915 xop0 = convert_to_mode (mode0, xop0, unsignedp);
1917 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1918 if (!insn_data[icode].operand[2].predicate (xop0, mode0))
1919 xop0 = copy_to_mode_reg (mode0, xop0);
1921 /* We could handle this, but we should always be called with a pseudo
1922 for our targets and all insns should take them as outputs. */
1923 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
1924 gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
1926 pat = GEN_FCN (icode) (targ0, targ1, xop0);
1927 if (pat)
1929 emit_insn (pat);
1930 return 1;
1932 else
1933 delete_insns_since (last);
1936 /* It can't be done in this mode. Can we do it in a wider mode? */
1938 if (CLASS_HAS_WIDER_MODES_P (class))
1940 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1941 wider_mode != VOIDmode;
1942 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1944 if (unoptab->handlers[(int) wider_mode].insn_code
1945 != CODE_FOR_nothing)
1947 rtx t0 = gen_reg_rtx (wider_mode);
1948 rtx t1 = gen_reg_rtx (wider_mode);
1949 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
1951 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
1953 convert_move (targ0, t0, unsignedp);
1954 convert_move (targ1, t1, unsignedp);
1955 return 1;
1957 else
1958 delete_insns_since (last);
1963 delete_insns_since (entry_last);
1964 return 0;
1967 /* Generate code to perform an operation specified by BINOPTAB
1968 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1969 We assume that the order of the operands for the instruction
1970 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1971 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1973 Either TARG0 or TARG1 may be zero, but what that means is that
1974 the result is not actually wanted. We will generate it into
1975 a dummy pseudo-reg and discard it. They may not both be zero.
1977 Returns 1 if this operation can be performed; 0 if not. */
1980 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
1981 int unsignedp)
1983 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
1984 enum mode_class class;
1985 enum machine_mode wider_mode;
1986 rtx entry_last = get_last_insn ();
1987 rtx last;
1989 class = GET_MODE_CLASS (mode);
1991 /* If we are inside an appropriately-short loop and we are optimizing,
1992 force expensive constants into a register. */
1993 if (CONSTANT_P (op0) && optimize
1994 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
1995 op0 = force_reg (mode, op0);
1997 if (CONSTANT_P (op1) && optimize
1998 && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
1999 op1 = force_reg (mode, op1);
2001 if (!targ0)
2002 targ0 = gen_reg_rtx (mode);
2003 if (!targ1)
2004 targ1 = gen_reg_rtx (mode);
2006 /* Record where to go back to if we fail. */
2007 last = get_last_insn ();
2009 if (binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2011 int icode = (int) binoptab->handlers[(int) mode].insn_code;
2012 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2013 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
2014 rtx pat;
2015 rtx xop0 = op0, xop1 = op1;
2017 /* In case the insn wants input operands in modes different from
2018 those of the actual operands, convert the operands. It would
2019 seem that we don't need to convert CONST_INTs, but we do, so
2020 that they're properly zero-extended, sign-extended or truncated
2021 for their mode. */
2023 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
2024 xop0 = convert_modes (mode0,
2025 GET_MODE (op0) != VOIDmode
2026 ? GET_MODE (op0)
2027 : mode,
2028 xop0, unsignedp);
2030 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
2031 xop1 = convert_modes (mode1,
2032 GET_MODE (op1) != VOIDmode
2033 ? GET_MODE (op1)
2034 : mode,
2035 xop1, unsignedp);
2037 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2038 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2039 xop0 = copy_to_mode_reg (mode0, xop0);
2041 if (!insn_data[icode].operand[2].predicate (xop1, mode1))
2042 xop1 = copy_to_mode_reg (mode1, xop1);
2044 /* We could handle this, but we should always be called with a pseudo
2045 for our targets and all insns should take them as outputs. */
2046 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2047 gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
2049 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
2050 if (pat)
2052 emit_insn (pat);
2053 return 1;
2055 else
2056 delete_insns_since (last);
2059 /* It can't be done in this mode. Can we do it in a wider mode? */
2061 if (CLASS_HAS_WIDER_MODES_P (class))
2063 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2064 wider_mode != VOIDmode;
2065 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2067 if (binoptab->handlers[(int) wider_mode].insn_code
2068 != CODE_FOR_nothing)
2070 rtx t0 = gen_reg_rtx (wider_mode);
2071 rtx t1 = gen_reg_rtx (wider_mode);
2072 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2073 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2075 if (expand_twoval_binop (binoptab, cop0, cop1,
2076 t0, t1, unsignedp))
2078 convert_move (targ0, t0, unsignedp);
2079 convert_move (targ1, t1, unsignedp);
2080 return 1;
2082 else
2083 delete_insns_since (last);
2088 delete_insns_since (entry_last);
2089 return 0;
2092 /* Expand the two-valued library call indicated by BINOPTAB, but
2093 preserve only one of the values. If TARG0 is non-NULL, the first
2094 value is placed into TARG0; otherwise the second value is placed
2095 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2096 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2097 This routine assumes that the value returned by the library call is
2098 as if the return value was of an integral mode twice as wide as the
2099 mode of OP0. Returns 1 if the call was successful. */
2101 bool
2102 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2103 rtx targ0, rtx targ1, enum rtx_code code)
2105 enum machine_mode mode;
2106 enum machine_mode libval_mode;
2107 rtx libval;
2108 rtx insns;
2110 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2111 gcc_assert (!targ0 != !targ1);
2113 mode = GET_MODE (op0);
2114 if (!binoptab->handlers[(int) mode].libfunc)
2115 return false;
2117 /* The value returned by the library function will have twice as
2118 many bits as the nominal MODE. */
2119 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2120 MODE_INT);
2121 start_sequence ();
2122 libval = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
2123 NULL_RTX, LCT_CONST,
2124 libval_mode, 2,
2125 op0, mode,
2126 op1, mode);
2127 /* Get the part of VAL containing the value that we want. */
2128 libval = simplify_gen_subreg (mode, libval, libval_mode,
2129 targ0 ? 0 : GET_MODE_SIZE (mode));
2130 insns = get_insns ();
2131 end_sequence ();
2132 /* Move the into the desired location. */
2133 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2134 gen_rtx_fmt_ee (code, mode, op0, op1));
2136 return true;
2140 /* Wrapper around expand_unop which takes an rtx code to specify
2141 the operation to perform, not an optab pointer. All other
2142 arguments are the same. */
2144 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2145 rtx target, int unsignedp)
2147 optab unop = code_to_optab[(int) code];
2148 gcc_assert (unop);
2150 return expand_unop (mode, unop, op0, target, unsignedp);
2153 /* Try calculating
2154 (clz:narrow x)
2156 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2157 static rtx
2158 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2160 enum mode_class class = GET_MODE_CLASS (mode);
2161 if (CLASS_HAS_WIDER_MODES_P (class))
2163 enum machine_mode wider_mode;
2164 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2165 wider_mode != VOIDmode;
2166 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2168 if (clz_optab->handlers[(int) wider_mode].insn_code
2169 != CODE_FOR_nothing)
2171 rtx xop0, temp, last;
2173 last = get_last_insn ();
2175 if (target == 0)
2176 target = gen_reg_rtx (mode);
2177 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2178 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2179 if (temp != 0)
2180 temp = expand_binop (wider_mode, sub_optab, temp,
2181 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2182 - GET_MODE_BITSIZE (mode)),
2183 target, true, OPTAB_DIRECT);
2184 if (temp == 0)
2185 delete_insns_since (last);
2187 return temp;
2191 return 0;
2194 /* Try calculating (parity x) as (and (popcount x) 1), where
2195 popcount can also be done in a wider mode. */
2196 static rtx
2197 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2199 enum mode_class class = GET_MODE_CLASS (mode);
2200 if (CLASS_HAS_WIDER_MODES_P (class))
2202 enum machine_mode wider_mode;
2203 for (wider_mode = mode; wider_mode != VOIDmode;
2204 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2206 if (popcount_optab->handlers[(int) wider_mode].insn_code
2207 != CODE_FOR_nothing)
2209 rtx xop0, temp, last;
2211 last = get_last_insn ();
2213 if (target == 0)
2214 target = gen_reg_rtx (mode);
2215 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2216 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2217 true);
2218 if (temp != 0)
2219 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2220 target, true, OPTAB_DIRECT);
2221 if (temp == 0)
2222 delete_insns_since (last);
2224 return temp;
2228 return 0;
2231 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2232 conditions, VAL may already be a SUBREG against which we cannot generate
2233 a further SUBREG. In this case, we expect forcing the value into a
2234 register will work around the situation. */
2236 static rtx
2237 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2238 enum machine_mode imode)
2240 rtx ret;
2241 ret = lowpart_subreg (omode, val, imode);
2242 if (ret == NULL)
2244 val = force_reg (imode, val);
2245 ret = lowpart_subreg (omode, val, imode);
2246 gcc_assert (ret != NULL);
2248 return ret;
2251 /* Expand a floating point absolute value or negation operation via a
2252 logical operation on the sign bit. */
2254 static rtx
2255 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2256 rtx op0, rtx target)
2258 const struct real_format *fmt;
2259 int bitpos, word, nwords, i;
2260 enum machine_mode imode;
2261 HOST_WIDE_INT hi, lo;
2262 rtx temp, insns;
2264 /* The format has to have a simple sign bit. */
2265 fmt = REAL_MODE_FORMAT (mode);
2266 if (fmt == NULL)
2267 return NULL_RTX;
2269 bitpos = fmt->signbit_rw;
2270 if (bitpos < 0)
2271 return NULL_RTX;
2273 /* Don't create negative zeros if the format doesn't support them. */
2274 if (code == NEG && !fmt->has_signed_zero)
2275 return NULL_RTX;
2277 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2279 imode = int_mode_for_mode (mode);
2280 if (imode == BLKmode)
2281 return NULL_RTX;
2282 word = 0;
2283 nwords = 1;
2285 else
2287 imode = word_mode;
2289 if (FLOAT_WORDS_BIG_ENDIAN)
2290 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2291 else
2292 word = bitpos / BITS_PER_WORD;
2293 bitpos = bitpos % BITS_PER_WORD;
2294 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2297 if (bitpos < HOST_BITS_PER_WIDE_INT)
2299 hi = 0;
2300 lo = (HOST_WIDE_INT) 1 << bitpos;
2302 else
2304 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2305 lo = 0;
2307 if (code == ABS)
2308 lo = ~lo, hi = ~hi;
2310 if (target == 0 || target == op0)
2311 target = gen_reg_rtx (mode);
2313 if (nwords > 1)
2315 start_sequence ();
2317 for (i = 0; i < nwords; ++i)
2319 rtx targ_piece = operand_subword (target, i, 1, mode);
2320 rtx op0_piece = operand_subword_force (op0, i, mode);
2322 if (i == word)
2324 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2325 op0_piece,
2326 immed_double_const (lo, hi, imode),
2327 targ_piece, 1, OPTAB_LIB_WIDEN);
2328 if (temp != targ_piece)
2329 emit_move_insn (targ_piece, temp);
2331 else
2332 emit_move_insn (targ_piece, op0_piece);
2335 insns = get_insns ();
2336 end_sequence ();
2338 temp = gen_rtx_fmt_e (code, mode, copy_rtx (op0));
2339 emit_no_conflict_block (insns, target, op0, NULL_RTX, temp);
2341 else
2343 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2344 gen_lowpart (imode, op0),
2345 immed_double_const (lo, hi, imode),
2346 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2347 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2349 set_unique_reg_note (get_last_insn (), REG_EQUAL,
2350 gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
2353 return target;
2356 /* Generate code to perform an operation specified by UNOPTAB
2357 on operand OP0, with result having machine-mode MODE.
2359 UNSIGNEDP is for the case where we have to widen the operands
2360 to perform the operation. It says to use zero-extension.
2362 If TARGET is nonzero, the value
2363 is generated there, if it is convenient to do so.
2364 In all cases an rtx is returned for the locus of the value;
2365 this may or may not be TARGET. */
2368 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
2369 int unsignedp)
2371 enum mode_class class;
2372 enum machine_mode wider_mode;
2373 rtx temp;
2374 rtx last = get_last_insn ();
2375 rtx pat;
2377 class = GET_MODE_CLASS (mode);
2379 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2381 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2382 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2383 rtx xop0 = op0;
2385 if (target)
2386 temp = target;
2387 else
2388 temp = gen_reg_rtx (mode);
2390 if (GET_MODE (xop0) != VOIDmode
2391 && GET_MODE (xop0) != mode0)
2392 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2394 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2396 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2397 xop0 = copy_to_mode_reg (mode0, xop0);
2399 if (!insn_data[icode].operand[0].predicate (temp, mode))
2400 temp = gen_reg_rtx (mode);
2402 pat = GEN_FCN (icode) (temp, xop0);
2403 if (pat)
2405 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2406 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
2408 delete_insns_since (last);
2409 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2412 emit_insn (pat);
2414 return temp;
2416 else
2417 delete_insns_since (last);
2420 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2422 /* Widening clz needs special treatment. */
2423 if (unoptab == clz_optab)
2425 temp = widen_clz (mode, op0, target);
2426 if (temp)
2427 return temp;
2428 else
2429 goto try_libcall;
2432 if (CLASS_HAS_WIDER_MODES_P (class))
2433 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2434 wider_mode != VOIDmode;
2435 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2437 if (unoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing)
2439 rtx xop0 = op0;
2441 /* For certain operations, we need not actually extend
2442 the narrow operand, as long as we will truncate the
2443 results to the same narrowness. */
2445 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2446 (unoptab == neg_optab
2447 || unoptab == one_cmpl_optab)
2448 && class == MODE_INT);
2450 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2451 unsignedp);
2453 if (temp)
2455 if (class != MODE_INT
2456 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2457 GET_MODE_BITSIZE (wider_mode)))
2459 if (target == 0)
2460 target = gen_reg_rtx (mode);
2461 convert_move (target, temp, 0);
2462 return target;
2464 else
2465 return gen_lowpart (mode, temp);
2467 else
2468 delete_insns_since (last);
2472 /* These can be done a word at a time. */
2473 if (unoptab == one_cmpl_optab
2474 && class == MODE_INT
2475 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
2476 && unoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
2478 int i;
2479 rtx insns;
2481 if (target == 0 || target == op0)
2482 target = gen_reg_rtx (mode);
2484 start_sequence ();
2486 /* Do the actual arithmetic. */
2487 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
2489 rtx target_piece = operand_subword (target, i, 1, mode);
2490 rtx x = expand_unop (word_mode, unoptab,
2491 operand_subword_force (op0, i, mode),
2492 target_piece, unsignedp);
2494 if (target_piece != x)
2495 emit_move_insn (target_piece, x);
2498 insns = get_insns ();
2499 end_sequence ();
2501 emit_no_conflict_block (insns, target, op0, NULL_RTX,
2502 gen_rtx_fmt_e (unoptab->code, mode,
2503 copy_rtx (op0)));
2504 return target;
2507 if (unoptab->code == NEG)
2509 /* Try negating floating point values by flipping the sign bit. */
2510 if (SCALAR_FLOAT_MODE_P (mode))
2512 temp = expand_absneg_bit (NEG, mode, op0, target);
2513 if (temp)
2514 return temp;
2517 /* If there is no negation pattern, and we have no negative zero,
2518 try subtracting from zero. */
2519 if (!HONOR_SIGNED_ZEROS (mode))
2521 temp = expand_binop (mode, (unoptab == negv_optab
2522 ? subv_optab : sub_optab),
2523 CONST0_RTX (mode), op0, target,
2524 unsignedp, OPTAB_DIRECT);
2525 if (temp)
2526 return temp;
2530 /* Try calculating parity (x) as popcount (x) % 2. */
2531 if (unoptab == parity_optab)
2533 temp = expand_parity (mode, op0, target);
2534 if (temp)
2535 return temp;
2538 try_libcall:
2539 /* Now try a library call in this mode. */
2540 if (unoptab->handlers[(int) mode].libfunc)
2542 rtx insns;
2543 rtx value;
2544 enum machine_mode outmode = mode;
2546 /* All of these functions return small values. Thus we choose to
2547 have them return something that isn't a double-word. */
2548 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
2549 || unoptab == popcount_optab || unoptab == parity_optab)
2550 outmode
2551 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node)));
2553 start_sequence ();
2555 /* Pass 1 for NO_QUEUE so we don't lose any increments
2556 if the libcall is cse'd or moved. */
2557 value = emit_library_call_value (unoptab->handlers[(int) mode].libfunc,
2558 NULL_RTX, LCT_CONST, outmode,
2559 1, op0, mode);
2560 insns = get_insns ();
2561 end_sequence ();
2563 target = gen_reg_rtx (outmode);
2564 emit_libcall_block (insns, target, value,
2565 gen_rtx_fmt_e (unoptab->code, mode, op0));
2567 return target;
2570 /* It can't be done in this mode. Can we do it in a wider mode? */
2572 if (CLASS_HAS_WIDER_MODES_P (class))
2574 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2575 wider_mode != VOIDmode;
2576 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2578 if ((unoptab->handlers[(int) wider_mode].insn_code
2579 != CODE_FOR_nothing)
2580 || unoptab->handlers[(int) wider_mode].libfunc)
2582 rtx xop0 = op0;
2584 /* For certain operations, we need not actually extend
2585 the narrow operand, as long as we will truncate the
2586 results to the same narrowness. */
2588 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2589 (unoptab == neg_optab
2590 || unoptab == one_cmpl_optab)
2591 && class == MODE_INT);
2593 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2594 unsignedp);
2596 /* If we are generating clz using wider mode, adjust the
2597 result. */
2598 if (unoptab == clz_optab && temp != 0)
2599 temp = expand_binop (wider_mode, sub_optab, temp,
2600 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2601 - GET_MODE_BITSIZE (mode)),
2602 target, true, OPTAB_DIRECT);
2604 if (temp)
2606 if (class != MODE_INT)
2608 if (target == 0)
2609 target = gen_reg_rtx (mode);
2610 convert_move (target, temp, 0);
2611 return target;
2613 else
2614 return gen_lowpart (mode, temp);
2616 else
2617 delete_insns_since (last);
2622 /* One final attempt at implementing negation via subtraction,
2623 this time allowing widening of the operand. */
2624 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
2626 rtx temp;
2627 temp = expand_binop (mode,
2628 unoptab == negv_optab ? subv_optab : sub_optab,
2629 CONST0_RTX (mode), op0,
2630 target, unsignedp, OPTAB_LIB_WIDEN);
2631 if (temp)
2632 return temp;
2635 return 0;
2638 /* Emit code to compute the absolute value of OP0, with result to
2639 TARGET if convenient. (TARGET may be 0.) The return value says
2640 where the result actually is to be found.
2642 MODE is the mode of the operand; the mode of the result is
2643 different but can be deduced from MODE.
2648 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
2649 int result_unsignedp)
2651 rtx temp;
2653 if (! flag_trapv)
2654 result_unsignedp = 1;
2656 /* First try to do it with a special abs instruction. */
2657 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
2658 op0, target, 0);
2659 if (temp != 0)
2660 return temp;
2662 /* For floating point modes, try clearing the sign bit. */
2663 if (SCALAR_FLOAT_MODE_P (mode))
2665 temp = expand_absneg_bit (ABS, mode, op0, target);
2666 if (temp)
2667 return temp;
2670 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2671 if (smax_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
2672 && !HONOR_SIGNED_ZEROS (mode))
2674 rtx last = get_last_insn ();
2676 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
2677 if (temp != 0)
2678 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
2679 OPTAB_WIDEN);
2681 if (temp != 0)
2682 return temp;
2684 delete_insns_since (last);
2687 /* If this machine has expensive jumps, we can do integer absolute
2688 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2689 where W is the width of MODE. */
2691 if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
2693 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
2694 size_int (GET_MODE_BITSIZE (mode) - 1),
2695 NULL_RTX, 0);
2697 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
2698 OPTAB_LIB_WIDEN);
2699 if (temp != 0)
2700 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
2701 temp, extended, target, 0, OPTAB_LIB_WIDEN);
2703 if (temp != 0)
2704 return temp;
2707 return NULL_RTX;
2711 expand_abs (enum machine_mode mode, rtx op0, rtx target,
2712 int result_unsignedp, int safe)
2714 rtx temp, op1;
2716 if (! flag_trapv)
2717 result_unsignedp = 1;
2719 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
2720 if (temp != 0)
2721 return temp;
2723 /* If that does not win, use conditional jump and negate. */
2725 /* It is safe to use the target if it is the same
2726 as the source if this is also a pseudo register */
2727 if (op0 == target && REG_P (op0)
2728 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
2729 safe = 1;
2731 op1 = gen_label_rtx ();
2732 if (target == 0 || ! safe
2733 || GET_MODE (target) != mode
2734 || (MEM_P (target) && MEM_VOLATILE_P (target))
2735 || (REG_P (target)
2736 && REGNO (target) < FIRST_PSEUDO_REGISTER))
2737 target = gen_reg_rtx (mode);
2739 emit_move_insn (target, op0);
2740 NO_DEFER_POP;
2742 /* If this mode is an integer too wide to compare properly,
2743 compare word by word. Rely on CSE to optimize constant cases. */
2744 if (GET_MODE_CLASS (mode) == MODE_INT
2745 && ! can_compare_p (GE, mode, ccp_jump))
2746 do_jump_by_parts_greater_rtx (mode, 0, target, const0_rtx,
2747 NULL_RTX, op1);
2748 else
2749 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
2750 NULL_RTX, NULL_RTX, op1);
2752 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
2753 target, target, 0);
2754 if (op0 != target)
2755 emit_move_insn (target, op0);
2756 emit_label (op1);
2757 OK_DEFER_POP;
2758 return target;
2761 /* A subroutine of expand_copysign, perform the copysign operation using the
2762 abs and neg primitives advertised to exist on the target. The assumption
2763 is that we have a split register file, and leaving op0 in fp registers,
2764 and not playing with subregs so much, will help the register allocator. */
2766 static rtx
2767 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
2768 int bitpos, bool op0_is_abs)
2770 enum machine_mode imode;
2771 HOST_WIDE_INT hi, lo;
2772 int word;
2773 rtx label;
2775 if (target == op1)
2776 target = NULL_RTX;
2778 if (!op0_is_abs)
2780 op0 = expand_unop (mode, abs_optab, op0, target, 0);
2781 if (op0 == NULL)
2782 return NULL_RTX;
2783 target = op0;
2785 else
2787 if (target == NULL_RTX)
2788 target = copy_to_reg (op0);
2789 else
2790 emit_move_insn (target, op0);
2793 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2795 imode = int_mode_for_mode (mode);
2796 if (imode == BLKmode)
2797 return NULL_RTX;
2798 op1 = gen_lowpart (imode, op1);
2800 else
2802 imode = word_mode;
2803 if (FLOAT_WORDS_BIG_ENDIAN)
2804 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2805 else
2806 word = bitpos / BITS_PER_WORD;
2807 bitpos = bitpos % BITS_PER_WORD;
2808 op1 = operand_subword_force (op1, word, mode);
2811 if (bitpos < HOST_BITS_PER_WIDE_INT)
2813 hi = 0;
2814 lo = (HOST_WIDE_INT) 1 << bitpos;
2816 else
2818 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2819 lo = 0;
2822 op1 = expand_binop (imode, and_optab, op1,
2823 immed_double_const (lo, hi, imode),
2824 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2826 label = gen_label_rtx ();
2827 emit_cmp_and_jump_insns (op1, const0_rtx, EQ, NULL_RTX, imode, 1, label);
2829 if (GET_CODE (op0) == CONST_DOUBLE)
2830 op0 = simplify_unary_operation (NEG, mode, op0, mode);
2831 else
2832 op0 = expand_unop (mode, neg_optab, op0, target, 0);
2833 if (op0 != target)
2834 emit_move_insn (target, op0);
2836 emit_label (label);
2838 return target;
2842 /* A subroutine of expand_copysign, perform the entire copysign operation
2843 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
2844 is true if op0 is known to have its sign bit clear. */
2846 static rtx
2847 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
2848 int bitpos, bool op0_is_abs)
2850 enum machine_mode imode;
2851 HOST_WIDE_INT hi, lo;
2852 int word, nwords, i;
2853 rtx temp, insns;
2855 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2857 imode = int_mode_for_mode (mode);
2858 if (imode == BLKmode)
2859 return NULL_RTX;
2860 word = 0;
2861 nwords = 1;
2863 else
2865 imode = word_mode;
2867 if (FLOAT_WORDS_BIG_ENDIAN)
2868 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2869 else
2870 word = bitpos / BITS_PER_WORD;
2871 bitpos = bitpos % BITS_PER_WORD;
2872 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2875 if (bitpos < HOST_BITS_PER_WIDE_INT)
2877 hi = 0;
2878 lo = (HOST_WIDE_INT) 1 << bitpos;
2880 else
2882 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2883 lo = 0;
2886 if (target == 0 || target == op0 || target == op1)
2887 target = gen_reg_rtx (mode);
2889 if (nwords > 1)
2891 start_sequence ();
2893 for (i = 0; i < nwords; ++i)
2895 rtx targ_piece = operand_subword (target, i, 1, mode);
2896 rtx op0_piece = operand_subword_force (op0, i, mode);
2898 if (i == word)
2900 if (!op0_is_abs)
2901 op0_piece = expand_binop (imode, and_optab, op0_piece,
2902 immed_double_const (~lo, ~hi, imode),
2903 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2905 op1 = expand_binop (imode, and_optab,
2906 operand_subword_force (op1, i, mode),
2907 immed_double_const (lo, hi, imode),
2908 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2910 temp = expand_binop (imode, ior_optab, op0_piece, op1,
2911 targ_piece, 1, OPTAB_LIB_WIDEN);
2912 if (temp != targ_piece)
2913 emit_move_insn (targ_piece, temp);
2915 else
2916 emit_move_insn (targ_piece, op0_piece);
2919 insns = get_insns ();
2920 end_sequence ();
2922 emit_no_conflict_block (insns, target, op0, op1, NULL_RTX);
2924 else
2926 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
2927 immed_double_const (lo, hi, imode),
2928 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2930 op0 = gen_lowpart (imode, op0);
2931 if (!op0_is_abs)
2932 op0 = expand_binop (imode, and_optab, op0,
2933 immed_double_const (~lo, ~hi, imode),
2934 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2936 temp = expand_binop (imode, ior_optab, op0, op1,
2937 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2938 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2941 return target;
2944 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
2945 scalar floating point mode. Return NULL if we do not know how to
2946 expand the operation inline. */
2949 expand_copysign (rtx op0, rtx op1, rtx target)
2951 enum machine_mode mode = GET_MODE (op0);
2952 const struct real_format *fmt;
2953 bool op0_is_abs;
2954 rtx temp;
2956 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
2957 gcc_assert (GET_MODE (op1) == mode);
2959 /* First try to do it with a special instruction. */
2960 temp = expand_binop (mode, copysign_optab, op0, op1,
2961 target, 0, OPTAB_DIRECT);
2962 if (temp)
2963 return temp;
2965 fmt = REAL_MODE_FORMAT (mode);
2966 if (fmt == NULL || !fmt->has_signed_zero)
2967 return NULL_RTX;
2969 op0_is_abs = false;
2970 if (GET_CODE (op0) == CONST_DOUBLE)
2972 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
2973 op0 = simplify_unary_operation (ABS, mode, op0, mode);
2974 op0_is_abs = true;
2977 if (fmt->signbit_ro >= 0
2978 && (GET_CODE (op0) == CONST_DOUBLE
2979 || (neg_optab->handlers[mode].insn_code != CODE_FOR_nothing
2980 && abs_optab->handlers[mode].insn_code != CODE_FOR_nothing)))
2982 temp = expand_copysign_absneg (mode, op0, op1, target,
2983 fmt->signbit_ro, op0_is_abs);
2984 if (temp)
2985 return temp;
2988 if (fmt->signbit_rw < 0)
2989 return NULL_RTX;
2990 return expand_copysign_bit (mode, op0, op1, target,
2991 fmt->signbit_rw, op0_is_abs);
2994 /* Generate an instruction whose insn-code is INSN_CODE,
2995 with two operands: an output TARGET and an input OP0.
2996 TARGET *must* be nonzero, and the output is always stored there.
2997 CODE is an rtx code such that (CODE OP0) is an rtx that describes
2998 the value that is stored into TARGET. */
3000 void
3001 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
3003 rtx temp;
3004 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3005 rtx pat;
3007 temp = target;
3009 /* Now, if insn does not accept our operands, put them into pseudos. */
3011 if (!insn_data[icode].operand[1].predicate (op0, mode0))
3012 op0 = copy_to_mode_reg (mode0, op0);
3014 if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp)))
3015 temp = gen_reg_rtx (GET_MODE (temp));
3017 pat = GEN_FCN (icode) (temp, op0);
3019 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3020 add_equal_note (pat, temp, code, op0, NULL_RTX);
3022 emit_insn (pat);
3024 if (temp != target)
3025 emit_move_insn (target, temp);
3028 struct no_conflict_data
3030 rtx target, first, insn;
3031 bool must_stay;
3034 /* Called via note_stores by emit_no_conflict_block and emit_libcall_block.
3035 Set P->must_stay if the currently examined clobber / store has to stay
3036 in the list of insns that constitute the actual no_conflict block /
3037 libcall block. */
3038 static void
3039 no_conflict_move_test (rtx dest, rtx set, void *p0)
3041 struct no_conflict_data *p= p0;
3043 /* If this inns directly contributes to setting the target, it must stay. */
3044 if (reg_overlap_mentioned_p (p->target, dest))
3045 p->must_stay = true;
3046 /* If we haven't committed to keeping any other insns in the list yet,
3047 there is nothing more to check. */
3048 else if (p->insn == p->first)
3049 return;
3050 /* If this insn sets / clobbers a register that feeds one of the insns
3051 already in the list, this insn has to stay too. */
3052 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3053 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3054 || reg_used_between_p (dest, p->first, p->insn)
3055 /* Likewise if this insn depends on a register set by a previous
3056 insn in the list, or if it sets a result (presumably a hard
3057 register) that is set or clobbered by a previous insn.
3058 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3059 SET_DEST perform the former check on the address, and the latter
3060 check on the MEM. */
3061 || (GET_CODE (set) == SET
3062 && (modified_in_p (SET_SRC (set), p->first)
3063 || modified_in_p (SET_DEST (set), p->first)
3064 || modified_between_p (SET_SRC (set), p->first, p->insn)
3065 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3066 p->must_stay = true;
3069 /* Emit code to perform a series of operations on a multi-word quantity, one
3070 word at a time.
3072 Such a block is preceded by a CLOBBER of the output, consists of multiple
3073 insns, each setting one word of the output, and followed by a SET copying
3074 the output to itself.
3076 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3077 note indicating that it doesn't conflict with the (also multi-word)
3078 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3079 notes.
3081 INSNS is a block of code generated to perform the operation, not including
3082 the CLOBBER and final copy. All insns that compute intermediate values
3083 are first emitted, followed by the block as described above.
3085 TARGET, OP0, and OP1 are the output and inputs of the operations,
3086 respectively. OP1 may be zero for a unary operation.
3088 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3089 on the last insn.
3091 If TARGET is not a register, INSNS is simply emitted with no special
3092 processing. Likewise if anything in INSNS is not an INSN or if
3093 there is a libcall block inside INSNS.
3095 The final insn emitted is returned. */
3098 emit_no_conflict_block (rtx insns, rtx target, rtx op0, rtx op1, rtx equiv)
3100 rtx prev, next, first, last, insn;
3102 if (!REG_P (target) || reload_in_progress)
3103 return emit_insn (insns);
3104 else
3105 for (insn = insns; insn; insn = NEXT_INSN (insn))
3106 if (!NONJUMP_INSN_P (insn)
3107 || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
3108 return emit_insn (insns);
3110 /* First emit all insns that do not store into words of the output and remove
3111 these from the list. */
3112 for (insn = insns; insn; insn = next)
3114 rtx note;
3115 struct no_conflict_data data;
3117 next = NEXT_INSN (insn);
3119 /* Some ports (cris) create a libcall regions at their own. We must
3120 avoid any potential nesting of LIBCALLs. */
3121 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3122 remove_note (insn, note);
3123 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3124 remove_note (insn, note);
3126 data.target = target;
3127 data.first = insns;
3128 data.insn = insn;
3129 data.must_stay = 0;
3130 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3131 if (! data.must_stay)
3133 if (PREV_INSN (insn))
3134 NEXT_INSN (PREV_INSN (insn)) = next;
3135 else
3136 insns = next;
3138 if (next)
3139 PREV_INSN (next) = PREV_INSN (insn);
3141 add_insn (insn);
3145 prev = get_last_insn ();
3147 /* Now write the CLOBBER of the output, followed by the setting of each
3148 of the words, followed by the final copy. */
3149 if (target != op0 && target != op1)
3150 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
3152 for (insn = insns; insn; insn = next)
3154 next = NEXT_INSN (insn);
3155 add_insn (insn);
3157 if (op1 && REG_P (op1))
3158 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op1,
3159 REG_NOTES (insn));
3161 if (op0 && REG_P (op0))
3162 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op0,
3163 REG_NOTES (insn));
3166 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3167 != CODE_FOR_nothing)
3169 last = emit_move_insn (target, target);
3170 if (equiv)
3171 set_unique_reg_note (last, REG_EQUAL, equiv);
3173 else
3175 last = get_last_insn ();
3177 /* Remove any existing REG_EQUAL note from "last", or else it will
3178 be mistaken for a note referring to the full contents of the
3179 alleged libcall value when found together with the REG_RETVAL
3180 note added below. An existing note can come from an insn
3181 expansion at "last". */
3182 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3185 if (prev == 0)
3186 first = get_insns ();
3187 else
3188 first = NEXT_INSN (prev);
3190 /* Encapsulate the block so it gets manipulated as a unit. */
3191 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3192 REG_NOTES (first));
3193 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first, REG_NOTES (last));
3195 return last;
3198 /* Emit code to make a call to a constant function or a library call.
3200 INSNS is a list containing all insns emitted in the call.
3201 These insns leave the result in RESULT. Our block is to copy RESULT
3202 to TARGET, which is logically equivalent to EQUIV.
3204 We first emit any insns that set a pseudo on the assumption that these are
3205 loading constants into registers; doing so allows them to be safely cse'ed
3206 between blocks. Then we emit all the other insns in the block, followed by
3207 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3208 note with an operand of EQUIV.
3210 Moving assignments to pseudos outside of the block is done to improve
3211 the generated code, but is not required to generate correct code,
3212 hence being unable to move an assignment is not grounds for not making
3213 a libcall block. There are two reasons why it is safe to leave these
3214 insns inside the block: First, we know that these pseudos cannot be
3215 used in generated RTL outside the block since they are created for
3216 temporary purposes within the block. Second, CSE will not record the
3217 values of anything set inside a libcall block, so we know they must
3218 be dead at the end of the block.
3220 Except for the first group of insns (the ones setting pseudos), the
3221 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3223 void
3224 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3226 rtx final_dest = target;
3227 rtx prev, next, first, last, insn;
3229 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3230 into a MEM later. Protect the libcall block from this change. */
3231 if (! REG_P (target) || REG_USERVAR_P (target))
3232 target = gen_reg_rtx (GET_MODE (target));
3234 /* If we're using non-call exceptions, a libcall corresponding to an
3235 operation that may trap may also trap. */
3236 if (flag_non_call_exceptions && may_trap_p (equiv))
3238 for (insn = insns; insn; insn = NEXT_INSN (insn))
3239 if (CALL_P (insn))
3241 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3243 if (note != 0 && INTVAL (XEXP (note, 0)) <= 0)
3244 remove_note (insn, note);
3247 else
3248 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3249 reg note to indicate that this call cannot throw or execute a nonlocal
3250 goto (unless there is already a REG_EH_REGION note, in which case
3251 we update it). */
3252 for (insn = insns; insn; insn = NEXT_INSN (insn))
3253 if (CALL_P (insn))
3255 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3257 if (note != 0)
3258 XEXP (note, 0) = constm1_rtx;
3259 else
3260 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx,
3261 REG_NOTES (insn));
3264 /* First emit all insns that set pseudos. Remove them from the list as
3265 we go. Avoid insns that set pseudos which were referenced in previous
3266 insns. These can be generated by move_by_pieces, for example,
3267 to update an address. Similarly, avoid insns that reference things
3268 set in previous insns. */
3270 for (insn = insns; insn; insn = next)
3272 rtx set = single_set (insn);
3273 rtx note;
3275 /* Some ports (cris) create a libcall regions at their own. We must
3276 avoid any potential nesting of LIBCALLs. */
3277 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3278 remove_note (insn, note);
3279 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3280 remove_note (insn, note);
3282 next = NEXT_INSN (insn);
3284 if (set != 0 && REG_P (SET_DEST (set))
3285 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3287 struct no_conflict_data data;
3289 data.target = const0_rtx;
3290 data.first = insns;
3291 data.insn = insn;
3292 data.must_stay = 0;
3293 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3294 if (! data.must_stay)
3296 if (PREV_INSN (insn))
3297 NEXT_INSN (PREV_INSN (insn)) = next;
3298 else
3299 insns = next;
3301 if (next)
3302 PREV_INSN (next) = PREV_INSN (insn);
3304 add_insn (insn);
3308 /* Some ports use a loop to copy large arguments onto the stack.
3309 Don't move anything outside such a loop. */
3310 if (LABEL_P (insn))
3311 break;
3314 prev = get_last_insn ();
3316 /* Write the remaining insns followed by the final copy. */
3318 for (insn = insns; insn; insn = next)
3320 next = NEXT_INSN (insn);
3322 add_insn (insn);
3325 last = emit_move_insn (target, result);
3326 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3327 != CODE_FOR_nothing)
3328 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
3329 else
3331 /* Remove any existing REG_EQUAL note from "last", or else it will
3332 be mistaken for a note referring to the full contents of the
3333 libcall value when found together with the REG_RETVAL note added
3334 below. An existing note can come from an insn expansion at
3335 "last". */
3336 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3339 if (final_dest != target)
3340 emit_move_insn (final_dest, target);
3342 if (prev == 0)
3343 first = get_insns ();
3344 else
3345 first = NEXT_INSN (prev);
3347 /* Encapsulate the block so it gets manipulated as a unit. */
3348 if (!flag_non_call_exceptions || !may_trap_p (equiv))
3350 /* We can't attach the REG_LIBCALL and REG_RETVAL notes
3351 when the encapsulated region would not be in one basic block,
3352 i.e. when there is a control_flow_insn_p insn between FIRST and LAST.
3354 bool attach_libcall_retval_notes = true;
3355 next = NEXT_INSN (last);
3356 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3357 if (control_flow_insn_p (insn))
3359 attach_libcall_retval_notes = false;
3360 break;
3363 if (attach_libcall_retval_notes)
3365 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3366 REG_NOTES (first));
3367 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first,
3368 REG_NOTES (last));
3373 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3374 PURPOSE describes how this comparison will be used. CODE is the rtx
3375 comparison code we will be using.
3377 ??? Actually, CODE is slightly weaker than that. A target is still
3378 required to implement all of the normal bcc operations, but not
3379 required to implement all (or any) of the unordered bcc operations. */
3382 can_compare_p (enum rtx_code code, enum machine_mode mode,
3383 enum can_compare_purpose purpose)
3387 if (cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3389 if (purpose == ccp_jump)
3390 return bcc_gen_fctn[(int) code] != NULL;
3391 else if (purpose == ccp_store_flag)
3392 return setcc_gen_code[(int) code] != CODE_FOR_nothing;
3393 else
3394 /* There's only one cmov entry point, and it's allowed to fail. */
3395 return 1;
3397 if (purpose == ccp_jump
3398 && cbranch_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3399 return 1;
3400 if (purpose == ccp_cmov
3401 && cmov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3402 return 1;
3403 if (purpose == ccp_store_flag
3404 && cstore_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3405 return 1;
3406 mode = GET_MODE_WIDER_MODE (mode);
3408 while (mode != VOIDmode);
3410 return 0;
3413 /* This function is called when we are going to emit a compare instruction that
3414 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3416 *PMODE is the mode of the inputs (in case they are const_int).
3417 *PUNSIGNEDP nonzero says that the operands are unsigned;
3418 this matters if they need to be widened.
3420 If they have mode BLKmode, then SIZE specifies the size of both operands.
3422 This function performs all the setup necessary so that the caller only has
3423 to emit a single comparison insn. This setup can involve doing a BLKmode
3424 comparison or emitting a library call to perform the comparison if no insn
3425 is available to handle it.
3426 The values which are passed in through pointers can be modified; the caller
3427 should perform the comparison on the modified values. Constant
3428 comparisons must have already been folded. */
3430 static void
3431 prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
3432 enum machine_mode *pmode, int *punsignedp,
3433 enum can_compare_purpose purpose)
3435 enum machine_mode mode = *pmode;
3436 rtx x = *px, y = *py;
3437 int unsignedp = *punsignedp;
3439 /* If we are inside an appropriately-short loop and we are optimizing,
3440 force expensive constants into a register. */
3441 if (CONSTANT_P (x) && optimize
3442 && rtx_cost (x, COMPARE) > COSTS_N_INSNS (1))
3443 x = force_reg (mode, x);
3445 if (CONSTANT_P (y) && optimize
3446 && rtx_cost (y, COMPARE) > COSTS_N_INSNS (1))
3447 y = force_reg (mode, y);
3449 #ifdef HAVE_cc0
3450 /* Make sure if we have a canonical comparison. The RTL
3451 documentation states that canonical comparisons are required only
3452 for targets which have cc0. */
3453 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3454 #endif
3456 /* Don't let both operands fail to indicate the mode. */
3457 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3458 x = force_reg (mode, x);
3460 /* Handle all BLKmode compares. */
3462 if (mode == BLKmode)
3464 enum machine_mode cmp_mode, result_mode;
3465 enum insn_code cmp_code;
3466 tree length_type;
3467 rtx libfunc;
3468 rtx result;
3469 rtx opalign
3470 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3472 gcc_assert (size);
3474 /* Try to use a memory block compare insn - either cmpstr
3475 or cmpmem will do. */
3476 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
3477 cmp_mode != VOIDmode;
3478 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
3480 cmp_code = cmpmem_optab[cmp_mode];
3481 if (cmp_code == CODE_FOR_nothing)
3482 cmp_code = cmpstr_optab[cmp_mode];
3483 if (cmp_code == CODE_FOR_nothing)
3484 cmp_code = cmpstrn_optab[cmp_mode];
3485 if (cmp_code == CODE_FOR_nothing)
3486 continue;
3488 /* Must make sure the size fits the insn's mode. */
3489 if ((GET_CODE (size) == CONST_INT
3490 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
3491 || (GET_MODE_BITSIZE (GET_MODE (size))
3492 > GET_MODE_BITSIZE (cmp_mode)))
3493 continue;
3495 result_mode = insn_data[cmp_code].operand[0].mode;
3496 result = gen_reg_rtx (result_mode);
3497 size = convert_to_mode (cmp_mode, size, 1);
3498 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
3500 *px = result;
3501 *py = const0_rtx;
3502 *pmode = result_mode;
3503 return;
3506 /* Otherwise call a library function, memcmp. */
3507 libfunc = memcmp_libfunc;
3508 length_type = sizetype;
3509 result_mode = TYPE_MODE (integer_type_node);
3510 cmp_mode = TYPE_MODE (length_type);
3511 size = convert_to_mode (TYPE_MODE (length_type), size,
3512 TYPE_UNSIGNED (length_type));
3514 result = emit_library_call_value (libfunc, 0, LCT_PURE_MAKE_BLOCK,
3515 result_mode, 3,
3516 XEXP (x, 0), Pmode,
3517 XEXP (y, 0), Pmode,
3518 size, cmp_mode);
3519 *px = result;
3520 *py = const0_rtx;
3521 *pmode = result_mode;
3522 return;
3525 /* Don't allow operands to the compare to trap, as that can put the
3526 compare and branch in different basic blocks. */
3527 if (flag_non_call_exceptions)
3529 if (may_trap_p (x))
3530 x = force_reg (mode, x);
3531 if (may_trap_p (y))
3532 y = force_reg (mode, y);
3535 *px = x;
3536 *py = y;
3537 if (can_compare_p (*pcomparison, mode, purpose))
3538 return;
3540 /* Handle a lib call just for the mode we are using. */
3542 if (cmp_optab->handlers[(int) mode].libfunc && !SCALAR_FLOAT_MODE_P (mode))
3544 rtx libfunc = cmp_optab->handlers[(int) mode].libfunc;
3545 rtx result;
3547 /* If we want unsigned, and this mode has a distinct unsigned
3548 comparison routine, use that. */
3549 if (unsignedp && ucmp_optab->handlers[(int) mode].libfunc)
3550 libfunc = ucmp_optab->handlers[(int) mode].libfunc;
3552 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK,
3553 word_mode, 2, x, mode, y, mode);
3555 *px = result;
3556 *pmode = word_mode;
3557 if (TARGET_LIB_INT_CMP_BIASED)
3558 /* Integer comparison returns a result that must be compared
3559 against 1, so that even if we do an unsigned compare
3560 afterward, there is still a value that can represent the
3561 result "less than". */
3562 *py = const1_rtx;
3563 else
3565 *py = const0_rtx;
3566 *punsignedp = 1;
3568 return;
3571 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3572 prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
3575 /* Before emitting an insn with code ICODE, make sure that X, which is going
3576 to be used for operand OPNUM of the insn, is converted from mode MODE to
3577 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3578 that it is accepted by the operand predicate. Return the new value. */
3580 static rtx
3581 prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
3582 enum machine_mode wider_mode, int unsignedp)
3584 if (mode != wider_mode)
3585 x = convert_modes (wider_mode, mode, x, unsignedp);
3587 if (!insn_data[icode].operand[opnum].predicate
3588 (x, insn_data[icode].operand[opnum].mode))
3590 if (no_new_pseudos)
3591 return NULL_RTX;
3592 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x);
3595 return x;
3598 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3599 we can do the comparison.
3600 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3601 be NULL_RTX which indicates that only a comparison is to be generated. */
3603 static void
3604 emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
3605 enum rtx_code comparison, int unsignedp, rtx label)
3607 rtx test = gen_rtx_fmt_ee (comparison, mode, x, y);
3608 enum mode_class class = GET_MODE_CLASS (mode);
3609 enum machine_mode wider_mode = mode;
3611 /* Try combined insns first. */
3614 enum insn_code icode;
3615 PUT_MODE (test, wider_mode);
3617 if (label)
3619 icode = cbranch_optab->handlers[(int) wider_mode].insn_code;
3621 if (icode != CODE_FOR_nothing
3622 && insn_data[icode].operand[0].predicate (test, wider_mode))
3624 x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp);
3625 y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp);
3626 emit_jump_insn (GEN_FCN (icode) (test, x, y, label));
3627 return;
3631 /* Handle some compares against zero. */
3632 icode = (int) tst_optab->handlers[(int) wider_mode].insn_code;
3633 if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing)
3635 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3636 emit_insn (GEN_FCN (icode) (x));
3637 if (label)
3638 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3639 return;
3642 /* Handle compares for which there is a directly suitable insn. */
3644 icode = (int) cmp_optab->handlers[(int) wider_mode].insn_code;
3645 if (icode != CODE_FOR_nothing)
3647 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3648 y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp);
3649 emit_insn (GEN_FCN (icode) (x, y));
3650 if (label)
3651 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3652 return;
3655 if (!CLASS_HAS_WIDER_MODES_P (class))
3656 break;
3658 wider_mode = GET_MODE_WIDER_MODE (wider_mode);
3660 while (wider_mode != VOIDmode);
3662 gcc_unreachable ();
3665 /* Generate code to compare X with Y so that the condition codes are
3666 set and to jump to LABEL if the condition is true. If X is a
3667 constant and Y is not a constant, then the comparison is swapped to
3668 ensure that the comparison RTL has the canonical form.
3670 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3671 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3672 the proper branch condition code.
3674 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3676 MODE is the mode of the inputs (in case they are const_int).
3678 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3679 be passed unchanged to emit_cmp_insn, then potentially converted into an
3680 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3682 void
3683 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
3684 enum machine_mode mode, int unsignedp, rtx label)
3686 rtx op0 = x, op1 = y;
3688 /* Swap operands and condition to ensure canonical RTL. */
3689 if (swap_commutative_operands_p (x, y))
3691 /* If we're not emitting a branch, this means some caller
3692 is out of sync. */
3693 gcc_assert (label);
3695 op0 = y, op1 = x;
3696 comparison = swap_condition (comparison);
3699 #ifdef HAVE_cc0
3700 /* If OP0 is still a constant, then both X and Y must be constants.
3701 Force X into a register to create canonical RTL. */
3702 if (CONSTANT_P (op0))
3703 op0 = force_reg (mode, op0);
3704 #endif
3706 if (unsignedp)
3707 comparison = unsigned_condition (comparison);
3709 prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp,
3710 ccp_jump);
3711 emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label);
3714 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3716 void
3717 emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
3718 enum machine_mode mode, int unsignedp)
3720 emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0);
3723 /* Emit a library call comparison between floating point X and Y.
3724 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3726 static void
3727 prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
3728 enum machine_mode *pmode, int *punsignedp)
3730 enum rtx_code comparison = *pcomparison;
3731 enum rtx_code swapped = swap_condition (comparison);
3732 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
3733 rtx x = *px;
3734 rtx y = *py;
3735 enum machine_mode orig_mode = GET_MODE (x);
3736 enum machine_mode mode;
3737 rtx value, target, insns, equiv;
3738 rtx libfunc = 0;
3739 bool reversed_p = false;
3741 for (mode = orig_mode;
3742 mode != VOIDmode;
3743 mode = GET_MODE_WIDER_MODE (mode))
3745 if ((libfunc = code_to_optab[comparison]->handlers[mode].libfunc))
3746 break;
3748 if ((libfunc = code_to_optab[swapped]->handlers[mode].libfunc))
3750 rtx tmp;
3751 tmp = x; x = y; y = tmp;
3752 comparison = swapped;
3753 break;
3756 if ((libfunc = code_to_optab[reversed]->handlers[mode].libfunc)
3757 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, reversed))
3759 comparison = reversed;
3760 reversed_p = true;
3761 break;
3765 gcc_assert (mode != VOIDmode);
3767 if (mode != orig_mode)
3769 x = convert_to_mode (mode, x, 0);
3770 y = convert_to_mode (mode, y, 0);
3773 /* Attach a REG_EQUAL note describing the semantics of the libcall to
3774 the RTL. The allows the RTL optimizers to delete the libcall if the
3775 condition can be determined at compile-time. */
3776 if (comparison == UNORDERED)
3778 rtx temp = simplify_gen_relational (NE, word_mode, mode, x, x);
3779 equiv = simplify_gen_relational (NE, word_mode, mode, y, y);
3780 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
3781 temp, const_true_rtx, equiv);
3783 else
3785 equiv = simplify_gen_relational (comparison, word_mode, mode, x, y);
3786 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
3788 rtx true_rtx, false_rtx;
3790 switch (comparison)
3792 case EQ:
3793 true_rtx = const0_rtx;
3794 false_rtx = const_true_rtx;
3795 break;
3797 case NE:
3798 true_rtx = const_true_rtx;
3799 false_rtx = const0_rtx;
3800 break;
3802 case GT:
3803 true_rtx = const1_rtx;
3804 false_rtx = const0_rtx;
3805 break;
3807 case GE:
3808 true_rtx = const0_rtx;
3809 false_rtx = constm1_rtx;
3810 break;
3812 case LT:
3813 true_rtx = constm1_rtx;
3814 false_rtx = const0_rtx;
3815 break;
3817 case LE:
3818 true_rtx = const0_rtx;
3819 false_rtx = const1_rtx;
3820 break;
3822 default:
3823 gcc_unreachable ();
3825 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
3826 equiv, true_rtx, false_rtx);
3830 start_sequence ();
3831 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
3832 word_mode, 2, x, mode, y, mode);
3833 insns = get_insns ();
3834 end_sequence ();
3836 target = gen_reg_rtx (word_mode);
3837 emit_libcall_block (insns, target, value, equiv);
3839 if (comparison == UNORDERED
3840 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
3841 comparison = reversed_p ? EQ : NE;
3843 *px = target;
3844 *py = const0_rtx;
3845 *pmode = word_mode;
3846 *pcomparison = comparison;
3847 *punsignedp = 0;
3850 /* Generate code to indirectly jump to a location given in the rtx LOC. */
3852 void
3853 emit_indirect_jump (rtx loc)
3855 if (!insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate
3856 (loc, Pmode))
3857 loc = copy_to_mode_reg (Pmode, loc);
3859 emit_jump_insn (gen_indirect_jump (loc));
3860 emit_barrier ();
3863 #ifdef HAVE_conditional_move
3865 /* Emit a conditional move instruction if the machine supports one for that
3866 condition and machine mode.
3868 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3869 the mode to use should they be constants. If it is VOIDmode, they cannot
3870 both be constants.
3872 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
3873 should be stored there. MODE is the mode to use should they be constants.
3874 If it is VOIDmode, they cannot both be constants.
3876 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3877 is not supported. */
3880 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
3881 enum machine_mode cmode, rtx op2, rtx op3,
3882 enum machine_mode mode, int unsignedp)
3884 rtx tem, subtarget, comparison, insn;
3885 enum insn_code icode;
3886 enum rtx_code reversed;
3888 /* If one operand is constant, make it the second one. Only do this
3889 if the other operand is not constant as well. */
3891 if (swap_commutative_operands_p (op0, op1))
3893 tem = op0;
3894 op0 = op1;
3895 op1 = tem;
3896 code = swap_condition (code);
3899 /* get_condition will prefer to generate LT and GT even if the old
3900 comparison was against zero, so undo that canonicalization here since
3901 comparisons against zero are cheaper. */
3902 if (code == LT && op1 == const1_rtx)
3903 code = LE, op1 = const0_rtx;
3904 else if (code == GT && op1 == constm1_rtx)
3905 code = GE, op1 = const0_rtx;
3907 if (cmode == VOIDmode)
3908 cmode = GET_MODE (op0);
3910 if (swap_commutative_operands_p (op2, op3)
3911 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
3912 != UNKNOWN))
3914 tem = op2;
3915 op2 = op3;
3916 op3 = tem;
3917 code = reversed;
3920 if (mode == VOIDmode)
3921 mode = GET_MODE (op2);
3923 icode = movcc_gen_code[mode];
3925 if (icode == CODE_FOR_nothing)
3926 return 0;
3928 if (!target)
3929 target = gen_reg_rtx (mode);
3931 subtarget = target;
3933 /* If the insn doesn't accept these operands, put them in pseudos. */
3935 if (!insn_data[icode].operand[0].predicate
3936 (subtarget, insn_data[icode].operand[0].mode))
3937 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
3939 if (!insn_data[icode].operand[2].predicate
3940 (op2, insn_data[icode].operand[2].mode))
3941 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
3943 if (!insn_data[icode].operand[3].predicate
3944 (op3, insn_data[icode].operand[3].mode))
3945 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
3947 /* Everything should now be in the suitable form, so emit the compare insn
3948 and then the conditional move. */
3950 comparison
3951 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
3953 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
3954 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
3955 return NULL and let the caller figure out how best to deal with this
3956 situation. */
3957 if (GET_CODE (comparison) != code)
3958 return NULL_RTX;
3960 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
3962 /* If that failed, then give up. */
3963 if (insn == 0)
3964 return 0;
3966 emit_insn (insn);
3968 if (subtarget != target)
3969 convert_move (target, subtarget, 0);
3971 return target;
3974 /* Return nonzero if a conditional move of mode MODE is supported.
3976 This function is for combine so it can tell whether an insn that looks
3977 like a conditional move is actually supported by the hardware. If we
3978 guess wrong we lose a bit on optimization, but that's it. */
3979 /* ??? sparc64 supports conditionally moving integers values based on fp
3980 comparisons, and vice versa. How do we handle them? */
3983 can_conditionally_move_p (enum machine_mode mode)
3985 if (movcc_gen_code[mode] != CODE_FOR_nothing)
3986 return 1;
3988 return 0;
3991 #endif /* HAVE_conditional_move */
3993 /* Emit a conditional addition instruction if the machine supports one for that
3994 condition and machine mode.
3996 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3997 the mode to use should they be constants. If it is VOIDmode, they cannot
3998 both be constants.
4000 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4001 should be stored there. MODE is the mode to use should they be constants.
4002 If it is VOIDmode, they cannot both be constants.
4004 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4005 is not supported. */
4008 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4009 enum machine_mode cmode, rtx op2, rtx op3,
4010 enum machine_mode mode, int unsignedp)
4012 rtx tem, subtarget, comparison, insn;
4013 enum insn_code icode;
4014 enum rtx_code reversed;
4016 /* If one operand is constant, make it the second one. Only do this
4017 if the other operand is not constant as well. */
4019 if (swap_commutative_operands_p (op0, op1))
4021 tem = op0;
4022 op0 = op1;
4023 op1 = tem;
4024 code = swap_condition (code);
4027 /* get_condition will prefer to generate LT and GT even if the old
4028 comparison was against zero, so undo that canonicalization here since
4029 comparisons against zero are cheaper. */
4030 if (code == LT && op1 == const1_rtx)
4031 code = LE, op1 = const0_rtx;
4032 else if (code == GT && op1 == constm1_rtx)
4033 code = GE, op1 = const0_rtx;
4035 if (cmode == VOIDmode)
4036 cmode = GET_MODE (op0);
4038 if (swap_commutative_operands_p (op2, op3)
4039 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4040 != UNKNOWN))
4042 tem = op2;
4043 op2 = op3;
4044 op3 = tem;
4045 code = reversed;
4048 if (mode == VOIDmode)
4049 mode = GET_MODE (op2);
4051 icode = addcc_optab->handlers[(int) mode].insn_code;
4053 if (icode == CODE_FOR_nothing)
4054 return 0;
4056 if (!target)
4057 target = gen_reg_rtx (mode);
4059 /* If the insn doesn't accept these operands, put them in pseudos. */
4061 if (!insn_data[icode].operand[0].predicate
4062 (target, insn_data[icode].operand[0].mode))
4063 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4064 else
4065 subtarget = target;
4067 if (!insn_data[icode].operand[2].predicate
4068 (op2, insn_data[icode].operand[2].mode))
4069 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4071 if (!insn_data[icode].operand[3].predicate
4072 (op3, insn_data[icode].operand[3].mode))
4073 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4075 /* Everything should now be in the suitable form, so emit the compare insn
4076 and then the conditional move. */
4078 comparison
4079 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4081 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4082 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4083 return NULL and let the caller figure out how best to deal with this
4084 situation. */
4085 if (GET_CODE (comparison) != code)
4086 return NULL_RTX;
4088 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4090 /* If that failed, then give up. */
4091 if (insn == 0)
4092 return 0;
4094 emit_insn (insn);
4096 if (subtarget != target)
4097 convert_move (target, subtarget, 0);
4099 return target;
4102 /* These functions attempt to generate an insn body, rather than
4103 emitting the insn, but if the gen function already emits them, we
4104 make no attempt to turn them back into naked patterns. */
4106 /* Generate and return an insn body to add Y to X. */
4109 gen_add2_insn (rtx x, rtx y)
4111 int icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4113 gcc_assert (insn_data[icode].operand[0].predicate
4114 (x, insn_data[icode].operand[0].mode));
4115 gcc_assert (insn_data[icode].operand[1].predicate
4116 (x, insn_data[icode].operand[1].mode));
4117 gcc_assert (insn_data[icode].operand[2].predicate
4118 (y, insn_data[icode].operand[2].mode));
4120 return GEN_FCN (icode) (x, x, y);
4123 /* Generate and return an insn body to add r1 and c,
4124 storing the result in r0. */
4126 gen_add3_insn (rtx r0, rtx r1, rtx c)
4128 int icode = (int) add_optab->handlers[(int) GET_MODE (r0)].insn_code;
4130 if (icode == CODE_FOR_nothing
4131 || !(insn_data[icode].operand[0].predicate
4132 (r0, insn_data[icode].operand[0].mode))
4133 || !(insn_data[icode].operand[1].predicate
4134 (r1, insn_data[icode].operand[1].mode))
4135 || !(insn_data[icode].operand[2].predicate
4136 (c, insn_data[icode].operand[2].mode)))
4137 return NULL_RTX;
4139 return GEN_FCN (icode) (r0, r1, c);
4143 have_add2_insn (rtx x, rtx y)
4145 int icode;
4147 gcc_assert (GET_MODE (x) != VOIDmode);
4149 icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4151 if (icode == CODE_FOR_nothing)
4152 return 0;
4154 if (!(insn_data[icode].operand[0].predicate
4155 (x, insn_data[icode].operand[0].mode))
4156 || !(insn_data[icode].operand[1].predicate
4157 (x, insn_data[icode].operand[1].mode))
4158 || !(insn_data[icode].operand[2].predicate
4159 (y, insn_data[icode].operand[2].mode)))
4160 return 0;
4162 return 1;
4165 /* Generate and return an insn body to subtract Y from X. */
4168 gen_sub2_insn (rtx x, rtx y)
4170 int icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4172 gcc_assert (insn_data[icode].operand[0].predicate
4173 (x, insn_data[icode].operand[0].mode));
4174 gcc_assert (insn_data[icode].operand[1].predicate
4175 (x, insn_data[icode].operand[1].mode));
4176 gcc_assert (insn_data[icode].operand[2].predicate
4177 (y, insn_data[icode].operand[2].mode));
4179 return GEN_FCN (icode) (x, x, y);
4182 /* Generate and return an insn body to subtract r1 and c,
4183 storing the result in r0. */
4185 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4187 int icode = (int) sub_optab->handlers[(int) GET_MODE (r0)].insn_code;
4189 if (icode == CODE_FOR_nothing
4190 || !(insn_data[icode].operand[0].predicate
4191 (r0, insn_data[icode].operand[0].mode))
4192 || !(insn_data[icode].operand[1].predicate
4193 (r1, insn_data[icode].operand[1].mode))
4194 || !(insn_data[icode].operand[2].predicate
4195 (c, insn_data[icode].operand[2].mode)))
4196 return NULL_RTX;
4198 return GEN_FCN (icode) (r0, r1, c);
4202 have_sub2_insn (rtx x, rtx y)
4204 int icode;
4206 gcc_assert (GET_MODE (x) != VOIDmode);
4208 icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4210 if (icode == CODE_FOR_nothing)
4211 return 0;
4213 if (!(insn_data[icode].operand[0].predicate
4214 (x, insn_data[icode].operand[0].mode))
4215 || !(insn_data[icode].operand[1].predicate
4216 (x, insn_data[icode].operand[1].mode))
4217 || !(insn_data[icode].operand[2].predicate
4218 (y, insn_data[icode].operand[2].mode)))
4219 return 0;
4221 return 1;
4224 /* Generate the body of an instruction to copy Y into X.
4225 It may be a list of insns, if one insn isn't enough. */
4228 gen_move_insn (rtx x, rtx y)
4230 rtx seq;
4232 start_sequence ();
4233 emit_move_insn_1 (x, y);
4234 seq = get_insns ();
4235 end_sequence ();
4236 return seq;
4239 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4240 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4241 no such operation exists, CODE_FOR_nothing will be returned. */
4243 enum insn_code
4244 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4245 int unsignedp)
4247 convert_optab tab;
4248 #ifdef HAVE_ptr_extend
4249 if (unsignedp < 0)
4250 return CODE_FOR_ptr_extend;
4251 #endif
4253 tab = unsignedp ? zext_optab : sext_optab;
4254 return tab->handlers[to_mode][from_mode].insn_code;
4257 /* Generate the body of an insn to extend Y (with mode MFROM)
4258 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4261 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4262 enum machine_mode mfrom, int unsignedp)
4264 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4265 return GEN_FCN (icode) (x, y);
4268 /* can_fix_p and can_float_p say whether the target machine
4269 can directly convert a given fixed point type to
4270 a given floating point type, or vice versa.
4271 The returned value is the CODE_FOR_... value to use,
4272 or CODE_FOR_nothing if these modes cannot be directly converted.
4274 *TRUNCP_PTR is set to 1 if it is necessary to output
4275 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4277 static enum insn_code
4278 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4279 int unsignedp, int *truncp_ptr)
4281 convert_optab tab;
4282 enum insn_code icode;
4284 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4285 icode = tab->handlers[fixmode][fltmode].insn_code;
4286 if (icode != CODE_FOR_nothing)
4288 *truncp_ptr = 0;
4289 return icode;
4292 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4293 for this to work. We need to rework the fix* and ftrunc* patterns
4294 and documentation. */
4295 tab = unsignedp ? ufix_optab : sfix_optab;
4296 icode = tab->handlers[fixmode][fltmode].insn_code;
4297 if (icode != CODE_FOR_nothing
4298 && ftrunc_optab->handlers[fltmode].insn_code != CODE_FOR_nothing)
4300 *truncp_ptr = 1;
4301 return icode;
4304 *truncp_ptr = 0;
4305 return CODE_FOR_nothing;
4308 static enum insn_code
4309 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4310 int unsignedp)
4312 convert_optab tab;
4314 tab = unsignedp ? ufloat_optab : sfloat_optab;
4315 return tab->handlers[fltmode][fixmode].insn_code;
4318 /* Generate code to convert FROM to floating point
4319 and store in TO. FROM must be fixed point and not VOIDmode.
4320 UNSIGNEDP nonzero means regard FROM as unsigned.
4321 Normally this is done by correcting the final value
4322 if it is negative. */
4324 void
4325 expand_float (rtx to, rtx from, int unsignedp)
4327 enum insn_code icode;
4328 rtx target = to;
4329 enum machine_mode fmode, imode;
4330 bool can_do_signed = false;
4332 /* Crash now, because we won't be able to decide which mode to use. */
4333 gcc_assert (GET_MODE (from) != VOIDmode);
4335 /* Look for an insn to do the conversion. Do it in the specified
4336 modes if possible; otherwise convert either input, output or both to
4337 wider mode. If the integer mode is wider than the mode of FROM,
4338 we can do the conversion signed even if the input is unsigned. */
4340 for (fmode = GET_MODE (to); fmode != VOIDmode;
4341 fmode = GET_MODE_WIDER_MODE (fmode))
4342 for (imode = GET_MODE (from); imode != VOIDmode;
4343 imode = GET_MODE_WIDER_MODE (imode))
4345 int doing_unsigned = unsignedp;
4347 if (fmode != GET_MODE (to)
4348 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from)))
4349 continue;
4351 icode = can_float_p (fmode, imode, unsignedp);
4352 if (icode == CODE_FOR_nothing && unsignedp)
4354 enum insn_code scode = can_float_p (fmode, imode, 0);
4355 if (scode != CODE_FOR_nothing)
4356 can_do_signed = true;
4357 if (imode != GET_MODE (from))
4358 icode = scode, doing_unsigned = 0;
4361 if (icode != CODE_FOR_nothing)
4363 if (imode != GET_MODE (from))
4364 from = convert_to_mode (imode, from, unsignedp);
4366 if (fmode != GET_MODE (to))
4367 target = gen_reg_rtx (fmode);
4369 emit_unop_insn (icode, target, from,
4370 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4372 if (target != to)
4373 convert_move (to, target, 0);
4374 return;
4378 /* Unsigned integer, and no way to convert directly.
4379 Convert as signed, then conditionally adjust the result. */
4380 if (unsignedp && can_do_signed)
4382 rtx label = gen_label_rtx ();
4383 rtx temp;
4384 REAL_VALUE_TYPE offset;
4386 /* Look for a usable floating mode FMODE wider than the source and at
4387 least as wide as the target. Using FMODE will avoid rounding woes
4388 with unsigned values greater than the signed maximum value. */
4390 for (fmode = GET_MODE (to); fmode != VOIDmode;
4391 fmode = GET_MODE_WIDER_MODE (fmode))
4392 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
4393 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
4394 break;
4396 if (fmode == VOIDmode)
4398 /* There is no such mode. Pretend the target is wide enough. */
4399 fmode = GET_MODE (to);
4401 /* Avoid double-rounding when TO is narrower than FROM. */
4402 if ((significand_size (fmode) + 1)
4403 < GET_MODE_BITSIZE (GET_MODE (from)))
4405 rtx temp1;
4406 rtx neglabel = gen_label_rtx ();
4408 /* Don't use TARGET if it isn't a register, is a hard register,
4409 or is the wrong mode. */
4410 if (!REG_P (target)
4411 || REGNO (target) < FIRST_PSEUDO_REGISTER
4412 || GET_MODE (target) != fmode)
4413 target = gen_reg_rtx (fmode);
4415 imode = GET_MODE (from);
4416 do_pending_stack_adjust ();
4418 /* Test whether the sign bit is set. */
4419 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4420 0, neglabel);
4422 /* The sign bit is not set. Convert as signed. */
4423 expand_float (target, from, 0);
4424 emit_jump_insn (gen_jump (label));
4425 emit_barrier ();
4427 /* The sign bit is set.
4428 Convert to a usable (positive signed) value by shifting right
4429 one bit, while remembering if a nonzero bit was shifted
4430 out; i.e., compute (from & 1) | (from >> 1). */
4432 emit_label (neglabel);
4433 temp = expand_binop (imode, and_optab, from, const1_rtx,
4434 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4435 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
4436 NULL_RTX, 1);
4437 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4438 OPTAB_LIB_WIDEN);
4439 expand_float (target, temp, 0);
4441 /* Multiply by 2 to undo the shift above. */
4442 temp = expand_binop (fmode, add_optab, target, target,
4443 target, 0, OPTAB_LIB_WIDEN);
4444 if (temp != target)
4445 emit_move_insn (target, temp);
4447 do_pending_stack_adjust ();
4448 emit_label (label);
4449 goto done;
4453 /* If we are about to do some arithmetic to correct for an
4454 unsigned operand, do it in a pseudo-register. */
4456 if (GET_MODE (to) != fmode
4457 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4458 target = gen_reg_rtx (fmode);
4460 /* Convert as signed integer to floating. */
4461 expand_float (target, from, 0);
4463 /* If FROM is negative (and therefore TO is negative),
4464 correct its value by 2**bitwidth. */
4466 do_pending_stack_adjust ();
4467 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
4468 0, label);
4471 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)));
4472 temp = expand_binop (fmode, add_optab, target,
4473 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
4474 target, 0, OPTAB_LIB_WIDEN);
4475 if (temp != target)
4476 emit_move_insn (target, temp);
4478 do_pending_stack_adjust ();
4479 emit_label (label);
4480 goto done;
4483 /* No hardware instruction available; call a library routine. */
4485 rtx libfunc;
4486 rtx insns;
4487 rtx value;
4488 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4490 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
4491 from = convert_to_mode (SImode, from, unsignedp);
4493 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4494 gcc_assert (libfunc);
4496 start_sequence ();
4498 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4499 GET_MODE (to), 1, from,
4500 GET_MODE (from));
4501 insns = get_insns ();
4502 end_sequence ();
4504 emit_libcall_block (insns, target, value,
4505 gen_rtx_FLOAT (GET_MODE (to), from));
4508 done:
4510 /* Copy result to requested destination
4511 if we have been computing in a temp location. */
4513 if (target != to)
4515 if (GET_MODE (target) == GET_MODE (to))
4516 emit_move_insn (to, target);
4517 else
4518 convert_move (to, target, 0);
4522 /* Generate code to convert FROM to fixed point and store in TO. FROM
4523 must be floating point. */
4525 void
4526 expand_fix (rtx to, rtx from, int unsignedp)
4528 enum insn_code icode;
4529 rtx target = to;
4530 enum machine_mode fmode, imode;
4531 int must_trunc = 0;
4533 /* We first try to find a pair of modes, one real and one integer, at
4534 least as wide as FROM and TO, respectively, in which we can open-code
4535 this conversion. If the integer mode is wider than the mode of TO,
4536 we can do the conversion either signed or unsigned. */
4538 for (fmode = GET_MODE (from); fmode != VOIDmode;
4539 fmode = GET_MODE_WIDER_MODE (fmode))
4540 for (imode = GET_MODE (to); imode != VOIDmode;
4541 imode = GET_MODE_WIDER_MODE (imode))
4543 int doing_unsigned = unsignedp;
4545 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
4546 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
4547 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
4549 if (icode != CODE_FOR_nothing)
4551 if (fmode != GET_MODE (from))
4552 from = convert_to_mode (fmode, from, 0);
4554 if (must_trunc)
4556 rtx temp = gen_reg_rtx (GET_MODE (from));
4557 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
4558 temp, 0);
4561 if (imode != GET_MODE (to))
4562 target = gen_reg_rtx (imode);
4564 emit_unop_insn (icode, target, from,
4565 doing_unsigned ? UNSIGNED_FIX : FIX);
4566 if (target != to)
4567 convert_move (to, target, unsignedp);
4568 return;
4572 /* For an unsigned conversion, there is one more way to do it.
4573 If we have a signed conversion, we generate code that compares
4574 the real value to the largest representable positive number. If if
4575 is smaller, the conversion is done normally. Otherwise, subtract
4576 one plus the highest signed number, convert, and add it back.
4578 We only need to check all real modes, since we know we didn't find
4579 anything with a wider integer mode.
4581 This code used to extend FP value into mode wider than the destination.
4582 This is not needed. Consider, for instance conversion from SFmode
4583 into DImode.
4585 The hot path trought the code is dealing with inputs smaller than 2^63
4586 and doing just the conversion, so there is no bits to lose.
4588 In the other path we know the value is positive in the range 2^63..2^64-1
4589 inclusive. (as for other imput overflow happens and result is undefined)
4590 So we know that the most important bit set in mantissa corresponds to
4591 2^63. The subtraction of 2^63 should not generate any rounding as it
4592 simply clears out that bit. The rest is trivial. */
4594 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
4595 for (fmode = GET_MODE (from); fmode != VOIDmode;
4596 fmode = GET_MODE_WIDER_MODE (fmode))
4597 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0,
4598 &must_trunc))
4600 int bitsize;
4601 REAL_VALUE_TYPE offset;
4602 rtx limit, lab1, lab2, insn;
4604 bitsize = GET_MODE_BITSIZE (GET_MODE (to));
4605 real_2expN (&offset, bitsize - 1);
4606 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
4607 lab1 = gen_label_rtx ();
4608 lab2 = gen_label_rtx ();
4610 if (fmode != GET_MODE (from))
4611 from = convert_to_mode (fmode, from, 0);
4613 /* See if we need to do the subtraction. */
4614 do_pending_stack_adjust ();
4615 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
4616 0, lab1);
4618 /* If not, do the signed "fix" and branch around fixup code. */
4619 expand_fix (to, from, 0);
4620 emit_jump_insn (gen_jump (lab2));
4621 emit_barrier ();
4623 /* Otherwise, subtract 2**(N-1), convert to signed number,
4624 then add 2**(N-1). Do the addition using XOR since this
4625 will often generate better code. */
4626 emit_label (lab1);
4627 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
4628 NULL_RTX, 0, OPTAB_LIB_WIDEN);
4629 expand_fix (to, target, 0);
4630 target = expand_binop (GET_MODE (to), xor_optab, to,
4631 gen_int_mode
4632 ((HOST_WIDE_INT) 1 << (bitsize - 1),
4633 GET_MODE (to)),
4634 to, 1, OPTAB_LIB_WIDEN);
4636 if (target != to)
4637 emit_move_insn (to, target);
4639 emit_label (lab2);
4641 if (mov_optab->handlers[(int) GET_MODE (to)].insn_code
4642 != CODE_FOR_nothing)
4644 /* Make a place for a REG_NOTE and add it. */
4645 insn = emit_move_insn (to, to);
4646 set_unique_reg_note (insn,
4647 REG_EQUAL,
4648 gen_rtx_fmt_e (UNSIGNED_FIX,
4649 GET_MODE (to),
4650 copy_rtx (from)));
4653 return;
4656 /* We can't do it with an insn, so use a library call. But first ensure
4657 that the mode of TO is at least as wide as SImode, since those are the
4658 only library calls we know about. */
4660 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
4662 target = gen_reg_rtx (SImode);
4664 expand_fix (target, from, unsignedp);
4666 else
4668 rtx insns;
4669 rtx value;
4670 rtx libfunc;
4672 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
4673 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4674 gcc_assert (libfunc);
4676 start_sequence ();
4678 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4679 GET_MODE (to), 1, from,
4680 GET_MODE (from));
4681 insns = get_insns ();
4682 end_sequence ();
4684 emit_libcall_block (insns, target, value,
4685 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
4686 GET_MODE (to), from));
4689 if (target != to)
4691 if (GET_MODE (to) == GET_MODE (target))
4692 emit_move_insn (to, target);
4693 else
4694 convert_move (to, target, 0);
4698 /* Report whether we have an instruction to perform the operation
4699 specified by CODE on operands of mode MODE. */
4701 have_insn_for (enum rtx_code code, enum machine_mode mode)
4703 return (code_to_optab[(int) code] != 0
4704 && (code_to_optab[(int) code]->handlers[(int) mode].insn_code
4705 != CODE_FOR_nothing));
4708 /* Create a blank optab. */
4709 static optab
4710 new_optab (void)
4712 int i;
4713 optab op = ggc_alloc (sizeof (struct optab));
4714 for (i = 0; i < NUM_MACHINE_MODES; i++)
4716 op->handlers[i].insn_code = CODE_FOR_nothing;
4717 op->handlers[i].libfunc = 0;
4720 return op;
4723 static convert_optab
4724 new_convert_optab (void)
4726 int i, j;
4727 convert_optab op = ggc_alloc (sizeof (struct convert_optab));
4728 for (i = 0; i < NUM_MACHINE_MODES; i++)
4729 for (j = 0; j < NUM_MACHINE_MODES; j++)
4731 op->handlers[i][j].insn_code = CODE_FOR_nothing;
4732 op->handlers[i][j].libfunc = 0;
4734 return op;
4737 /* Same, but fill in its code as CODE, and write it into the
4738 code_to_optab table. */
4739 static inline optab
4740 init_optab (enum rtx_code code)
4742 optab op = new_optab ();
4743 op->code = code;
4744 code_to_optab[(int) code] = op;
4745 return op;
4748 /* Same, but fill in its code as CODE, and do _not_ write it into
4749 the code_to_optab table. */
4750 static inline optab
4751 init_optabv (enum rtx_code code)
4753 optab op = new_optab ();
4754 op->code = code;
4755 return op;
4758 /* Conversion optabs never go in the code_to_optab table. */
4759 static inline convert_optab
4760 init_convert_optab (enum rtx_code code)
4762 convert_optab op = new_convert_optab ();
4763 op->code = code;
4764 return op;
4767 /* Initialize the libfunc fields of an entire group of entries in some
4768 optab. Each entry is set equal to a string consisting of a leading
4769 pair of underscores followed by a generic operation name followed by
4770 a mode name (downshifted to lowercase) followed by a single character
4771 representing the number of operands for the given operation (which is
4772 usually one of the characters '2', '3', or '4').
4774 OPTABLE is the table in which libfunc fields are to be initialized.
4775 FIRST_MODE is the first machine mode index in the given optab to
4776 initialize.
4777 LAST_MODE is the last machine mode index in the given optab to
4778 initialize.
4779 OPNAME is the generic (string) name of the operation.
4780 SUFFIX is the character which specifies the number of operands for
4781 the given generic operation.
4784 static void
4785 init_libfuncs (optab optable, int first_mode, int last_mode,
4786 const char *opname, int suffix)
4788 int mode;
4789 unsigned opname_len = strlen (opname);
4791 for (mode = first_mode; (int) mode <= (int) last_mode;
4792 mode = (enum machine_mode) ((int) mode + 1))
4794 const char *mname = GET_MODE_NAME (mode);
4795 unsigned mname_len = strlen (mname);
4796 char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1);
4797 char *p;
4798 const char *q;
4800 p = libfunc_name;
4801 *p++ = '_';
4802 *p++ = '_';
4803 for (q = opname; *q; )
4804 *p++ = *q++;
4805 for (q = mname; *q; q++)
4806 *p++ = TOLOWER (*q);
4807 *p++ = suffix;
4808 *p = '\0';
4810 optable->handlers[(int) mode].libfunc
4811 = init_one_libfunc (ggc_alloc_string (libfunc_name, p - libfunc_name));
4815 /* Initialize the libfunc fields of an entire group of entries in some
4816 optab which correspond to all integer mode operations. The parameters
4817 have the same meaning as similarly named ones for the `init_libfuncs'
4818 routine. (See above). */
4820 static void
4821 init_integral_libfuncs (optab optable, const char *opname, int suffix)
4823 int maxsize = 2*BITS_PER_WORD;
4824 if (maxsize < LONG_LONG_TYPE_SIZE)
4825 maxsize = LONG_LONG_TYPE_SIZE;
4826 init_libfuncs (optable, word_mode,
4827 mode_for_size (maxsize, MODE_INT, 0),
4828 opname, suffix);
4831 /* Initialize the libfunc fields of an entire group of entries in some
4832 optab which correspond to all real mode operations. The parameters
4833 have the same meaning as similarly named ones for the `init_libfuncs'
4834 routine. (See above). */
4836 static void
4837 init_floating_libfuncs (optab optable, const char *opname, int suffix)
4839 init_libfuncs (optable, MIN_MODE_FLOAT, MAX_MODE_FLOAT, opname, suffix);
4842 /* Initialize the libfunc fields of an entire group of entries of an
4843 inter-mode-class conversion optab. The string formation rules are
4844 similar to the ones for init_libfuncs, above, but instead of having
4845 a mode name and an operand count these functions have two mode names
4846 and no operand count. */
4847 static void
4848 init_interclass_conv_libfuncs (convert_optab tab, const char *opname,
4849 enum mode_class from_class,
4850 enum mode_class to_class)
4852 enum machine_mode first_from_mode = GET_CLASS_NARROWEST_MODE (from_class);
4853 enum machine_mode first_to_mode = GET_CLASS_NARROWEST_MODE (to_class);
4854 size_t opname_len = strlen (opname);
4855 size_t max_mname_len = 0;
4857 enum machine_mode fmode, tmode;
4858 const char *fname, *tname;
4859 const char *q;
4860 char *libfunc_name, *suffix;
4861 char *p;
4863 for (fmode = first_from_mode;
4864 fmode != VOIDmode;
4865 fmode = GET_MODE_WIDER_MODE (fmode))
4866 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (fmode)));
4868 for (tmode = first_to_mode;
4869 tmode != VOIDmode;
4870 tmode = GET_MODE_WIDER_MODE (tmode))
4871 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (tmode)));
4873 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
4874 libfunc_name[0] = '_';
4875 libfunc_name[1] = '_';
4876 memcpy (&libfunc_name[2], opname, opname_len);
4877 suffix = libfunc_name + opname_len + 2;
4879 for (fmode = first_from_mode; fmode != VOIDmode;
4880 fmode = GET_MODE_WIDER_MODE (fmode))
4881 for (tmode = first_to_mode; tmode != VOIDmode;
4882 tmode = GET_MODE_WIDER_MODE (tmode))
4884 fname = GET_MODE_NAME (fmode);
4885 tname = GET_MODE_NAME (tmode);
4887 p = suffix;
4888 for (q = fname; *q; p++, q++)
4889 *p = TOLOWER (*q);
4890 for (q = tname; *q; p++, q++)
4891 *p = TOLOWER (*q);
4893 *p = '\0';
4895 tab->handlers[tmode][fmode].libfunc
4896 = init_one_libfunc (ggc_alloc_string (libfunc_name,
4897 p - libfunc_name));
4901 /* Initialize the libfunc fields of an entire group of entries of an
4902 intra-mode-class conversion optab. The string formation rules are
4903 similar to the ones for init_libfunc, above. WIDENING says whether
4904 the optab goes from narrow to wide modes or vice versa. These functions
4905 have two mode names _and_ an operand count. */
4906 static void
4907 init_intraclass_conv_libfuncs (convert_optab tab, const char *opname,
4908 enum mode_class class, bool widening)
4910 enum machine_mode first_mode = GET_CLASS_NARROWEST_MODE (class);
4911 size_t opname_len = strlen (opname);
4912 size_t max_mname_len = 0;
4914 enum machine_mode nmode, wmode;
4915 const char *nname, *wname;
4916 const char *q;
4917 char *libfunc_name, *suffix;
4918 char *p;
4920 for (nmode = first_mode; nmode != VOIDmode;
4921 nmode = GET_MODE_WIDER_MODE (nmode))
4922 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (nmode)));
4924 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
4925 libfunc_name[0] = '_';
4926 libfunc_name[1] = '_';
4927 memcpy (&libfunc_name[2], opname, opname_len);
4928 suffix = libfunc_name + opname_len + 2;
4930 for (nmode = first_mode; nmode != VOIDmode;
4931 nmode = GET_MODE_WIDER_MODE (nmode))
4932 for (wmode = GET_MODE_WIDER_MODE (nmode); wmode != VOIDmode;
4933 wmode = GET_MODE_WIDER_MODE (wmode))
4935 nname = GET_MODE_NAME (nmode);
4936 wname = GET_MODE_NAME (wmode);
4938 p = suffix;
4939 for (q = widening ? nname : wname; *q; p++, q++)
4940 *p = TOLOWER (*q);
4941 for (q = widening ? wname : nname; *q; p++, q++)
4942 *p = TOLOWER (*q);
4944 *p++ = '2';
4945 *p = '\0';
4947 tab->handlers[widening ? wmode : nmode]
4948 [widening ? nmode : wmode].libfunc
4949 = init_one_libfunc (ggc_alloc_string (libfunc_name,
4950 p - libfunc_name));
4956 init_one_libfunc (const char *name)
4958 rtx symbol;
4960 /* Create a FUNCTION_DECL that can be passed to
4961 targetm.encode_section_info. */
4962 /* ??? We don't have any type information except for this is
4963 a function. Pretend this is "int foo()". */
4964 tree decl = build_decl (FUNCTION_DECL, get_identifier (name),
4965 build_function_type (integer_type_node, NULL_TREE));
4966 DECL_ARTIFICIAL (decl) = 1;
4967 DECL_EXTERNAL (decl) = 1;
4968 TREE_PUBLIC (decl) = 1;
4970 symbol = XEXP (DECL_RTL (decl), 0);
4972 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
4973 are the flags assigned by targetm.encode_section_info. */
4974 SYMBOL_REF_DECL (symbol) = 0;
4976 return symbol;
4979 /* Call this to reset the function entry for one optab (OPTABLE) in mode
4980 MODE to NAME, which should be either 0 or a string constant. */
4981 void
4982 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
4984 if (name)
4985 optable->handlers[mode].libfunc = init_one_libfunc (name);
4986 else
4987 optable->handlers[mode].libfunc = 0;
4990 /* Call this to reset the function entry for one conversion optab
4991 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
4992 either 0 or a string constant. */
4993 void
4994 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
4995 enum machine_mode fmode, const char *name)
4997 if (name)
4998 optable->handlers[tmode][fmode].libfunc = init_one_libfunc (name);
4999 else
5000 optable->handlers[tmode][fmode].libfunc = 0;
5003 /* Call this once to initialize the contents of the optabs
5004 appropriately for the current target machine. */
5006 void
5007 init_optabs (void)
5009 unsigned int i;
5011 /* Start by initializing all tables to contain CODE_FOR_nothing. */
5013 for (i = 0; i < NUM_RTX_CODE; i++)
5014 setcc_gen_code[i] = CODE_FOR_nothing;
5016 #ifdef HAVE_conditional_move
5017 for (i = 0; i < NUM_MACHINE_MODES; i++)
5018 movcc_gen_code[i] = CODE_FOR_nothing;
5019 #endif
5021 for (i = 0; i < NUM_MACHINE_MODES; i++)
5023 vcond_gen_code[i] = CODE_FOR_nothing;
5024 vcondu_gen_code[i] = CODE_FOR_nothing;
5027 add_optab = init_optab (PLUS);
5028 addv_optab = init_optabv (PLUS);
5029 sub_optab = init_optab (MINUS);
5030 subv_optab = init_optabv (MINUS);
5031 smul_optab = init_optab (MULT);
5032 smulv_optab = init_optabv (MULT);
5033 smul_highpart_optab = init_optab (UNKNOWN);
5034 umul_highpart_optab = init_optab (UNKNOWN);
5035 smul_widen_optab = init_optab (UNKNOWN);
5036 umul_widen_optab = init_optab (UNKNOWN);
5037 usmul_widen_optab = init_optab (UNKNOWN);
5038 sdiv_optab = init_optab (DIV);
5039 sdivv_optab = init_optabv (DIV);
5040 sdivmod_optab = init_optab (UNKNOWN);
5041 udiv_optab = init_optab (UDIV);
5042 udivmod_optab = init_optab (UNKNOWN);
5043 smod_optab = init_optab (MOD);
5044 umod_optab = init_optab (UMOD);
5045 fmod_optab = init_optab (UNKNOWN);
5046 drem_optab = init_optab (UNKNOWN);
5047 ftrunc_optab = init_optab (UNKNOWN);
5048 and_optab = init_optab (AND);
5049 ior_optab = init_optab (IOR);
5050 xor_optab = init_optab (XOR);
5051 ashl_optab = init_optab (ASHIFT);
5052 ashr_optab = init_optab (ASHIFTRT);
5053 lshr_optab = init_optab (LSHIFTRT);
5054 rotl_optab = init_optab (ROTATE);
5055 rotr_optab = init_optab (ROTATERT);
5056 smin_optab = init_optab (SMIN);
5057 smax_optab = init_optab (SMAX);
5058 umin_optab = init_optab (UMIN);
5059 umax_optab = init_optab (UMAX);
5060 pow_optab = init_optab (UNKNOWN);
5061 atan2_optab = init_optab (UNKNOWN);
5063 /* These three have codes assigned exclusively for the sake of
5064 have_insn_for. */
5065 mov_optab = init_optab (SET);
5066 movstrict_optab = init_optab (STRICT_LOW_PART);
5067 cmp_optab = init_optab (COMPARE);
5069 ucmp_optab = init_optab (UNKNOWN);
5070 tst_optab = init_optab (UNKNOWN);
5072 eq_optab = init_optab (EQ);
5073 ne_optab = init_optab (NE);
5074 gt_optab = init_optab (GT);
5075 ge_optab = init_optab (GE);
5076 lt_optab = init_optab (LT);
5077 le_optab = init_optab (LE);
5078 unord_optab = init_optab (UNORDERED);
5080 neg_optab = init_optab (NEG);
5081 negv_optab = init_optabv (NEG);
5082 abs_optab = init_optab (ABS);
5083 absv_optab = init_optabv (ABS);
5084 addcc_optab = init_optab (UNKNOWN);
5085 one_cmpl_optab = init_optab (NOT);
5086 ffs_optab = init_optab (FFS);
5087 clz_optab = init_optab (CLZ);
5088 ctz_optab = init_optab (CTZ);
5089 popcount_optab = init_optab (POPCOUNT);
5090 parity_optab = init_optab (PARITY);
5091 sqrt_optab = init_optab (SQRT);
5092 floor_optab = init_optab (UNKNOWN);
5093 lfloor_optab = init_optab (UNKNOWN);
5094 ceil_optab = init_optab (UNKNOWN);
5095 lceil_optab = init_optab (UNKNOWN);
5096 round_optab = init_optab (UNKNOWN);
5097 btrunc_optab = init_optab (UNKNOWN);
5098 nearbyint_optab = init_optab (UNKNOWN);
5099 rint_optab = init_optab (UNKNOWN);
5100 lrint_optab = init_optab (UNKNOWN);
5101 sincos_optab = init_optab (UNKNOWN);
5102 sin_optab = init_optab (UNKNOWN);
5103 asin_optab = init_optab (UNKNOWN);
5104 cos_optab = init_optab (UNKNOWN);
5105 acos_optab = init_optab (UNKNOWN);
5106 exp_optab = init_optab (UNKNOWN);
5107 exp10_optab = init_optab (UNKNOWN);
5108 exp2_optab = init_optab (UNKNOWN);
5109 expm1_optab = init_optab (UNKNOWN);
5110 ldexp_optab = init_optab (UNKNOWN);
5111 logb_optab = init_optab (UNKNOWN);
5112 ilogb_optab = init_optab (UNKNOWN);
5113 log_optab = init_optab (UNKNOWN);
5114 log10_optab = init_optab (UNKNOWN);
5115 log2_optab = init_optab (UNKNOWN);
5116 log1p_optab = init_optab (UNKNOWN);
5117 tan_optab = init_optab (UNKNOWN);
5118 atan_optab = init_optab (UNKNOWN);
5119 copysign_optab = init_optab (UNKNOWN);
5121 strlen_optab = init_optab (UNKNOWN);
5122 cbranch_optab = init_optab (UNKNOWN);
5123 cmov_optab = init_optab (UNKNOWN);
5124 cstore_optab = init_optab (UNKNOWN);
5125 push_optab = init_optab (UNKNOWN);
5127 reduc_smax_optab = init_optab (UNKNOWN);
5128 reduc_umax_optab = init_optab (UNKNOWN);
5129 reduc_smin_optab = init_optab (UNKNOWN);
5130 reduc_umin_optab = init_optab (UNKNOWN);
5131 reduc_splus_optab = init_optab (UNKNOWN);
5132 reduc_uplus_optab = init_optab (UNKNOWN);
5134 vec_extract_optab = init_optab (UNKNOWN);
5135 vec_set_optab = init_optab (UNKNOWN);
5136 vec_init_optab = init_optab (UNKNOWN);
5137 vec_shl_optab = init_optab (UNKNOWN);
5138 vec_shr_optab = init_optab (UNKNOWN);
5139 vec_realign_load_optab = init_optab (UNKNOWN);
5140 movmisalign_optab = init_optab (UNKNOWN);
5142 powi_optab = init_optab (UNKNOWN);
5144 /* Conversions. */
5145 sext_optab = init_convert_optab (SIGN_EXTEND);
5146 zext_optab = init_convert_optab (ZERO_EXTEND);
5147 trunc_optab = init_convert_optab (TRUNCATE);
5148 sfix_optab = init_convert_optab (FIX);
5149 ufix_optab = init_convert_optab (UNSIGNED_FIX);
5150 sfixtrunc_optab = init_convert_optab (UNKNOWN);
5151 ufixtrunc_optab = init_convert_optab (UNKNOWN);
5152 sfloat_optab = init_convert_optab (FLOAT);
5153 ufloat_optab = init_convert_optab (UNSIGNED_FLOAT);
5155 for (i = 0; i < NUM_MACHINE_MODES; i++)
5157 movmem_optab[i] = CODE_FOR_nothing;
5158 cmpstr_optab[i] = CODE_FOR_nothing;
5159 cmpstrn_optab[i] = CODE_FOR_nothing;
5160 cmpmem_optab[i] = CODE_FOR_nothing;
5161 setmem_optab[i] = CODE_FOR_nothing;
5163 sync_add_optab[i] = CODE_FOR_nothing;
5164 sync_sub_optab[i] = CODE_FOR_nothing;
5165 sync_ior_optab[i] = CODE_FOR_nothing;
5166 sync_and_optab[i] = CODE_FOR_nothing;
5167 sync_xor_optab[i] = CODE_FOR_nothing;
5168 sync_nand_optab[i] = CODE_FOR_nothing;
5169 sync_old_add_optab[i] = CODE_FOR_nothing;
5170 sync_old_sub_optab[i] = CODE_FOR_nothing;
5171 sync_old_ior_optab[i] = CODE_FOR_nothing;
5172 sync_old_and_optab[i] = CODE_FOR_nothing;
5173 sync_old_xor_optab[i] = CODE_FOR_nothing;
5174 sync_old_nand_optab[i] = CODE_FOR_nothing;
5175 sync_new_add_optab[i] = CODE_FOR_nothing;
5176 sync_new_sub_optab[i] = CODE_FOR_nothing;
5177 sync_new_ior_optab[i] = CODE_FOR_nothing;
5178 sync_new_and_optab[i] = CODE_FOR_nothing;
5179 sync_new_xor_optab[i] = CODE_FOR_nothing;
5180 sync_new_nand_optab[i] = CODE_FOR_nothing;
5181 sync_compare_and_swap[i] = CODE_FOR_nothing;
5182 sync_compare_and_swap_cc[i] = CODE_FOR_nothing;
5183 sync_lock_test_and_set[i] = CODE_FOR_nothing;
5184 sync_lock_release[i] = CODE_FOR_nothing;
5186 reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
5189 /* Fill in the optabs with the insns we support. */
5190 init_all_optabs ();
5192 /* Initialize the optabs with the names of the library functions. */
5193 init_integral_libfuncs (add_optab, "add", '3');
5194 init_floating_libfuncs (add_optab, "add", '3');
5195 init_integral_libfuncs (addv_optab, "addv", '3');
5196 init_floating_libfuncs (addv_optab, "add", '3');
5197 init_integral_libfuncs (sub_optab, "sub", '3');
5198 init_floating_libfuncs (sub_optab, "sub", '3');
5199 init_integral_libfuncs (subv_optab, "subv", '3');
5200 init_floating_libfuncs (subv_optab, "sub", '3');
5201 init_integral_libfuncs (smul_optab, "mul", '3');
5202 init_floating_libfuncs (smul_optab, "mul", '3');
5203 init_integral_libfuncs (smulv_optab, "mulv", '3');
5204 init_floating_libfuncs (smulv_optab, "mul", '3');
5205 init_integral_libfuncs (sdiv_optab, "div", '3');
5206 init_floating_libfuncs (sdiv_optab, "div", '3');
5207 init_integral_libfuncs (sdivv_optab, "divv", '3');
5208 init_integral_libfuncs (udiv_optab, "udiv", '3');
5209 init_integral_libfuncs (sdivmod_optab, "divmod", '4');
5210 init_integral_libfuncs (udivmod_optab, "udivmod", '4');
5211 init_integral_libfuncs (smod_optab, "mod", '3');
5212 init_integral_libfuncs (umod_optab, "umod", '3');
5213 init_floating_libfuncs (ftrunc_optab, "ftrunc", '2');
5214 init_integral_libfuncs (and_optab, "and", '3');
5215 init_integral_libfuncs (ior_optab, "ior", '3');
5216 init_integral_libfuncs (xor_optab, "xor", '3');
5217 init_integral_libfuncs (ashl_optab, "ashl", '3');
5218 init_integral_libfuncs (ashr_optab, "ashr", '3');
5219 init_integral_libfuncs (lshr_optab, "lshr", '3');
5220 init_integral_libfuncs (smin_optab, "min", '3');
5221 init_floating_libfuncs (smin_optab, "min", '3');
5222 init_integral_libfuncs (smax_optab, "max", '3');
5223 init_floating_libfuncs (smax_optab, "max", '3');
5224 init_integral_libfuncs (umin_optab, "umin", '3');
5225 init_integral_libfuncs (umax_optab, "umax", '3');
5226 init_integral_libfuncs (neg_optab, "neg", '2');
5227 init_floating_libfuncs (neg_optab, "neg", '2');
5228 init_integral_libfuncs (negv_optab, "negv", '2');
5229 init_floating_libfuncs (negv_optab, "neg", '2');
5230 init_integral_libfuncs (one_cmpl_optab, "one_cmpl", '2');
5231 init_integral_libfuncs (ffs_optab, "ffs", '2');
5232 init_integral_libfuncs (clz_optab, "clz", '2');
5233 init_integral_libfuncs (ctz_optab, "ctz", '2');
5234 init_integral_libfuncs (popcount_optab, "popcount", '2');
5235 init_integral_libfuncs (parity_optab, "parity", '2');
5237 /* Comparison libcalls for integers MUST come in pairs,
5238 signed/unsigned. */
5239 init_integral_libfuncs (cmp_optab, "cmp", '2');
5240 init_integral_libfuncs (ucmp_optab, "ucmp", '2');
5241 init_floating_libfuncs (cmp_optab, "cmp", '2');
5243 /* EQ etc are floating point only. */
5244 init_floating_libfuncs (eq_optab, "eq", '2');
5245 init_floating_libfuncs (ne_optab, "ne", '2');
5246 init_floating_libfuncs (gt_optab, "gt", '2');
5247 init_floating_libfuncs (ge_optab, "ge", '2');
5248 init_floating_libfuncs (lt_optab, "lt", '2');
5249 init_floating_libfuncs (le_optab, "le", '2');
5250 init_floating_libfuncs (unord_optab, "unord", '2');
5252 init_floating_libfuncs (powi_optab, "powi", '2');
5254 /* Conversions. */
5255 init_interclass_conv_libfuncs (sfloat_optab, "float",
5256 MODE_INT, MODE_FLOAT);
5257 init_interclass_conv_libfuncs (ufloat_optab, "floatun",
5258 MODE_INT, MODE_FLOAT);
5259 init_interclass_conv_libfuncs (sfix_optab, "fix",
5260 MODE_FLOAT, MODE_INT);
5261 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5262 MODE_FLOAT, MODE_INT);
5264 /* sext_optab is also used for FLOAT_EXTEND. */
5265 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, true);
5266 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, false);
5268 /* Use cabs for double complex abs, since systems generally have cabs.
5269 Don't define any libcall for float complex, so that cabs will be used. */
5270 if (complex_double_type_node)
5271 abs_optab->handlers[TYPE_MODE (complex_double_type_node)].libfunc
5272 = init_one_libfunc ("cabs");
5274 /* The ffs function operates on `int'. */
5275 ffs_optab->handlers[(int) mode_for_size (INT_TYPE_SIZE, MODE_INT, 0)].libfunc
5276 = init_one_libfunc ("ffs");
5278 abort_libfunc = init_one_libfunc ("abort");
5279 memcpy_libfunc = init_one_libfunc ("memcpy");
5280 memmove_libfunc = init_one_libfunc ("memmove");
5281 memcmp_libfunc = init_one_libfunc ("memcmp");
5282 memset_libfunc = init_one_libfunc ("memset");
5283 setbits_libfunc = init_one_libfunc ("__setbits");
5285 #ifndef DONT_USE_BUILTIN_SETJMP
5286 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
5287 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
5288 #else
5289 setjmp_libfunc = init_one_libfunc ("setjmp");
5290 longjmp_libfunc = init_one_libfunc ("longjmp");
5291 #endif
5292 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
5293 unwind_sjlj_unregister_libfunc
5294 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5296 /* For function entry/exit instrumentation. */
5297 profile_function_entry_libfunc
5298 = init_one_libfunc ("__cyg_profile_func_enter");
5299 profile_function_exit_libfunc
5300 = init_one_libfunc ("__cyg_profile_func_exit");
5302 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
5304 if (HAVE_conditional_trap)
5305 trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
5307 /* Allow the target to add more libcalls or rename some, etc. */
5308 targetm.init_libfuncs ();
5311 #ifdef DEBUG
5313 /* Print information about the current contents of the optabs on
5314 STDERR. */
5316 static void
5317 debug_optab_libfuncs (void)
5319 int i;
5320 int j;
5321 int k;
5323 /* Dump the arithmetic optabs. */
5324 for (i = 0; i != (int) OTI_MAX; i++)
5325 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5327 optab o;
5328 struct optab_handlers *h;
5330 o = optab_table[i];
5331 h = &o->handlers[j];
5332 if (h->libfunc)
5334 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5335 fprintf (stderr, "%s\t%s:\t%s\n",
5336 GET_RTX_NAME (o->code),
5337 GET_MODE_NAME (j),
5338 XSTR (h->libfunc, 0));
5342 /* Dump the conversion optabs. */
5343 for (i = 0; i < (int) COI_MAX; ++i)
5344 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5345 for (k = 0; k < NUM_MACHINE_MODES; ++k)
5347 convert_optab o;
5348 struct optab_handlers *h;
5350 o = &convert_optab_table[i];
5351 h = &o->handlers[j][k];
5352 if (h->libfunc)
5354 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5355 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
5356 GET_RTX_NAME (o->code),
5357 GET_MODE_NAME (j),
5358 GET_MODE_NAME (k),
5359 XSTR (h->libfunc, 0));
5364 #endif /* DEBUG */
5367 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5368 CODE. Return 0 on failure. */
5371 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1,
5372 rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED)
5374 enum machine_mode mode = GET_MODE (op1);
5375 enum insn_code icode;
5376 rtx insn;
5378 if (!HAVE_conditional_trap)
5379 return 0;
5381 if (mode == VOIDmode)
5382 return 0;
5384 icode = cmp_optab->handlers[(int) mode].insn_code;
5385 if (icode == CODE_FOR_nothing)
5386 return 0;
5388 start_sequence ();
5389 op1 = prepare_operand (icode, op1, 0, mode, mode, 0);
5390 op2 = prepare_operand (icode, op2, 1, mode, mode, 0);
5391 if (!op1 || !op2)
5393 end_sequence ();
5394 return 0;
5396 emit_insn (GEN_FCN (icode) (op1, op2));
5398 PUT_CODE (trap_rtx, code);
5399 gcc_assert (HAVE_conditional_trap);
5400 insn = gen_conditional_trap (trap_rtx, tcode);
5401 if (insn)
5403 emit_insn (insn);
5404 insn = get_insns ();
5406 end_sequence ();
5408 return insn;
5411 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5412 or unsigned operation code. */
5414 static enum rtx_code
5415 get_rtx_code (enum tree_code tcode, bool unsignedp)
5417 enum rtx_code code;
5418 switch (tcode)
5420 case EQ_EXPR:
5421 code = EQ;
5422 break;
5423 case NE_EXPR:
5424 code = NE;
5425 break;
5426 case LT_EXPR:
5427 code = unsignedp ? LTU : LT;
5428 break;
5429 case LE_EXPR:
5430 code = unsignedp ? LEU : LE;
5431 break;
5432 case GT_EXPR:
5433 code = unsignedp ? GTU : GT;
5434 break;
5435 case GE_EXPR:
5436 code = unsignedp ? GEU : GE;
5437 break;
5439 case UNORDERED_EXPR:
5440 code = UNORDERED;
5441 break;
5442 case ORDERED_EXPR:
5443 code = ORDERED;
5444 break;
5445 case UNLT_EXPR:
5446 code = UNLT;
5447 break;
5448 case UNLE_EXPR:
5449 code = UNLE;
5450 break;
5451 case UNGT_EXPR:
5452 code = UNGT;
5453 break;
5454 case UNGE_EXPR:
5455 code = UNGE;
5456 break;
5457 case UNEQ_EXPR:
5458 code = UNEQ;
5459 break;
5460 case LTGT_EXPR:
5461 code = LTGT;
5462 break;
5464 default:
5465 gcc_unreachable ();
5467 return code;
5470 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5471 unsigned operators. Do not generate compare instruction. */
5473 static rtx
5474 vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
5476 enum rtx_code rcode;
5477 tree t_op0, t_op1;
5478 rtx rtx_op0, rtx_op1;
5480 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
5481 ensures that condition is a relational operation. */
5482 gcc_assert (COMPARISON_CLASS_P (cond));
5484 rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
5485 t_op0 = TREE_OPERAND (cond, 0);
5486 t_op1 = TREE_OPERAND (cond, 1);
5488 /* Expand operands. */
5489 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)), 1);
5490 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)), 1);
5492 if (!insn_data[icode].operand[4].predicate (rtx_op0, GET_MODE (rtx_op0))
5493 && GET_MODE (rtx_op0) != VOIDmode)
5494 rtx_op0 = force_reg (GET_MODE (rtx_op0), rtx_op0);
5496 if (!insn_data[icode].operand[5].predicate (rtx_op1, GET_MODE (rtx_op1))
5497 && GET_MODE (rtx_op1) != VOIDmode)
5498 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
5500 return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1);
5503 /* Return insn code for VEC_COND_EXPR EXPR. */
5505 static inline enum insn_code
5506 get_vcond_icode (tree expr, enum machine_mode mode)
5508 enum insn_code icode = CODE_FOR_nothing;
5510 if (TYPE_UNSIGNED (TREE_TYPE (expr)))
5511 icode = vcondu_gen_code[mode];
5512 else
5513 icode = vcond_gen_code[mode];
5514 return icode;
5517 /* Return TRUE iff, appropriate vector insns are available
5518 for vector cond expr expr in VMODE mode. */
5520 bool
5521 expand_vec_cond_expr_p (tree expr, enum machine_mode vmode)
5523 if (get_vcond_icode (expr, vmode) == CODE_FOR_nothing)
5524 return false;
5525 return true;
5528 /* Generate insns for VEC_COND_EXPR. */
5531 expand_vec_cond_expr (tree vec_cond_expr, rtx target)
5533 enum insn_code icode;
5534 rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1;
5535 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_cond_expr));
5536 bool unsignedp = TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr));
5538 icode = get_vcond_icode (vec_cond_expr, mode);
5539 if (icode == CODE_FOR_nothing)
5540 return 0;
5542 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5543 target = gen_reg_rtx (mode);
5545 /* Get comparison rtx. First expand both cond expr operands. */
5546 comparison = vector_compare_rtx (TREE_OPERAND (vec_cond_expr, 0),
5547 unsignedp, icode);
5548 cc_op0 = XEXP (comparison, 0);
5549 cc_op1 = XEXP (comparison, 1);
5550 /* Expand both operands and force them in reg, if required. */
5551 rtx_op1 = expand_expr (TREE_OPERAND (vec_cond_expr, 1),
5552 NULL_RTX, VOIDmode, EXPAND_NORMAL);
5553 if (!insn_data[icode].operand[1].predicate (rtx_op1, mode)
5554 && mode != VOIDmode)
5555 rtx_op1 = force_reg (mode, rtx_op1);
5557 rtx_op2 = expand_expr (TREE_OPERAND (vec_cond_expr, 2),
5558 NULL_RTX, VOIDmode, EXPAND_NORMAL);
5559 if (!insn_data[icode].operand[2].predicate (rtx_op2, mode)
5560 && mode != VOIDmode)
5561 rtx_op2 = force_reg (mode, rtx_op2);
5563 /* Emit instruction! */
5564 emit_insn (GEN_FCN (icode) (target, rtx_op1, rtx_op2,
5565 comparison, cc_op0, cc_op1));
5567 return target;
5571 /* This is an internal subroutine of the other compare_and_swap expanders.
5572 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
5573 operation. TARGET is an optional place to store the value result of
5574 the operation. ICODE is the particular instruction to expand. Return
5575 the result of the operation. */
5577 static rtx
5578 expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val,
5579 rtx target, enum insn_code icode)
5581 enum machine_mode mode = GET_MODE (mem);
5582 rtx insn;
5584 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5585 target = gen_reg_rtx (mode);
5587 if (GET_MODE (old_val) != VOIDmode && GET_MODE (old_val) != mode)
5588 old_val = convert_modes (mode, GET_MODE (old_val), old_val, 1);
5589 if (!insn_data[icode].operand[2].predicate (old_val, mode))
5590 old_val = force_reg (mode, old_val);
5592 if (GET_MODE (new_val) != VOIDmode && GET_MODE (new_val) != mode)
5593 new_val = convert_modes (mode, GET_MODE (new_val), new_val, 1);
5594 if (!insn_data[icode].operand[3].predicate (new_val, mode))
5595 new_val = force_reg (mode, new_val);
5597 insn = GEN_FCN (icode) (target, mem, old_val, new_val);
5598 if (insn == NULL_RTX)
5599 return NULL_RTX;
5600 emit_insn (insn);
5602 return target;
5605 /* Expand a compare-and-swap operation and return its value. */
5608 expand_val_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
5610 enum machine_mode mode = GET_MODE (mem);
5611 enum insn_code icode = sync_compare_and_swap[mode];
5613 if (icode == CODE_FOR_nothing)
5614 return NULL_RTX;
5616 return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode);
5619 /* Expand a compare-and-swap operation and store true into the result if
5620 the operation was successful and false otherwise. Return the result.
5621 Unlike other routines, TARGET is not optional. */
5624 expand_bool_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
5626 enum machine_mode mode = GET_MODE (mem);
5627 enum insn_code icode;
5628 rtx subtarget, label0, label1;
5630 /* If the target supports a compare-and-swap pattern that simultaneously
5631 sets some flag for success, then use it. Otherwise use the regular
5632 compare-and-swap and follow that immediately with a compare insn. */
5633 icode = sync_compare_and_swap_cc[mode];
5634 switch (icode)
5636 default:
5637 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
5638 NULL_RTX, icode);
5639 if (subtarget != NULL_RTX)
5640 break;
5642 /* FALLTHRU */
5643 case CODE_FOR_nothing:
5644 icode = sync_compare_and_swap[mode];
5645 if (icode == CODE_FOR_nothing)
5646 return NULL_RTX;
5648 /* Ensure that if old_val == mem, that we're not comparing
5649 against an old value. */
5650 if (MEM_P (old_val))
5651 old_val = force_reg (mode, old_val);
5653 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
5654 NULL_RTX, icode);
5655 if (subtarget == NULL_RTX)
5656 return NULL_RTX;
5658 emit_cmp_insn (subtarget, old_val, EQ, const0_rtx, mode, true);
5661 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
5662 setcc instruction from the beginning. We don't work too hard here,
5663 but it's nice to not be stupid about initial code gen either. */
5664 if (STORE_FLAG_VALUE == 1)
5666 icode = setcc_gen_code[EQ];
5667 if (icode != CODE_FOR_nothing)
5669 enum machine_mode cmode = insn_data[icode].operand[0].mode;
5670 rtx insn;
5672 subtarget = target;
5673 if (!insn_data[icode].operand[0].predicate (target, cmode))
5674 subtarget = gen_reg_rtx (cmode);
5676 insn = GEN_FCN (icode) (subtarget);
5677 if (insn)
5679 emit_insn (insn);
5680 if (GET_MODE (target) != GET_MODE (subtarget))
5682 convert_move (target, subtarget, 1);
5683 subtarget = target;
5685 return subtarget;
5690 /* Without an appropriate setcc instruction, use a set of branches to
5691 get 1 and 0 stored into target. Presumably if the target has a
5692 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
5694 label0 = gen_label_rtx ();
5695 label1 = gen_label_rtx ();
5697 emit_jump_insn (bcc_gen_fctn[EQ] (label0));
5698 emit_move_insn (target, const0_rtx);
5699 emit_jump_insn (gen_jump (label1));
5700 emit_barrier ();
5701 emit_label (label0);
5702 emit_move_insn (target, const1_rtx);
5703 emit_label (label1);
5705 return target;
5708 /* This is a helper function for the other atomic operations. This function
5709 emits a loop that contains SEQ that iterates until a compare-and-swap
5710 operation at the end succeeds. MEM is the memory to be modified. SEQ is
5711 a set of instructions that takes a value from OLD_REG as an input and
5712 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
5713 set to the current contents of MEM. After SEQ, a compare-and-swap will
5714 attempt to update MEM with NEW_REG. The function returns true when the
5715 loop was generated successfully. */
5717 static bool
5718 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
5720 enum machine_mode mode = GET_MODE (mem);
5721 enum insn_code icode;
5722 rtx label, cmp_reg, subtarget;
5724 /* The loop we want to generate looks like
5726 cmp_reg = mem;
5727 label:
5728 old_reg = cmp_reg;
5729 seq;
5730 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
5731 if (cmp_reg != old_reg)
5732 goto label;
5734 Note that we only do the plain load from memory once. Subsequent
5735 iterations use the value loaded by the compare-and-swap pattern. */
5737 label = gen_label_rtx ();
5738 cmp_reg = gen_reg_rtx (mode);
5740 emit_move_insn (cmp_reg, mem);
5741 emit_label (label);
5742 emit_move_insn (old_reg, cmp_reg);
5743 if (seq)
5744 emit_insn (seq);
5746 /* If the target supports a compare-and-swap pattern that simultaneously
5747 sets some flag for success, then use it. Otherwise use the regular
5748 compare-and-swap and follow that immediately with a compare insn. */
5749 icode = sync_compare_and_swap_cc[mode];
5750 switch (icode)
5752 default:
5753 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
5754 cmp_reg, icode);
5755 if (subtarget != NULL_RTX)
5757 gcc_assert (subtarget == cmp_reg);
5758 break;
5761 /* FALLTHRU */
5762 case CODE_FOR_nothing:
5763 icode = sync_compare_and_swap[mode];
5764 if (icode == CODE_FOR_nothing)
5765 return false;
5767 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
5768 cmp_reg, icode);
5769 if (subtarget == NULL_RTX)
5770 return false;
5771 if (subtarget != cmp_reg)
5772 emit_move_insn (cmp_reg, subtarget);
5774 emit_cmp_insn (cmp_reg, old_reg, EQ, const0_rtx, mode, true);
5777 /* ??? Mark this jump predicted not taken? */
5778 emit_jump_insn (bcc_gen_fctn[NE] (label));
5780 return true;
5783 /* This function generates the atomic operation MEM CODE= VAL. In this
5784 case, we do not care about any resulting value. Returns NULL if we
5785 cannot generate the operation. */
5788 expand_sync_operation (rtx mem, rtx val, enum rtx_code code)
5790 enum machine_mode mode = GET_MODE (mem);
5791 enum insn_code icode;
5792 rtx insn;
5794 /* Look to see if the target supports the operation directly. */
5795 switch (code)
5797 case PLUS:
5798 icode = sync_add_optab[mode];
5799 break;
5800 case IOR:
5801 icode = sync_ior_optab[mode];
5802 break;
5803 case XOR:
5804 icode = sync_xor_optab[mode];
5805 break;
5806 case AND:
5807 icode = sync_and_optab[mode];
5808 break;
5809 case NOT:
5810 icode = sync_nand_optab[mode];
5811 break;
5813 case MINUS:
5814 icode = sync_sub_optab[mode];
5815 if (icode == CODE_FOR_nothing)
5817 icode = sync_add_optab[mode];
5818 if (icode != CODE_FOR_nothing)
5820 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
5821 code = PLUS;
5824 break;
5826 default:
5827 gcc_unreachable ();
5830 /* Generate the direct operation, if present. */
5831 if (icode != CODE_FOR_nothing)
5833 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
5834 val = convert_modes (mode, GET_MODE (val), val, 1);
5835 if (!insn_data[icode].operand[1].predicate (val, mode))
5836 val = force_reg (mode, val);
5838 insn = GEN_FCN (icode) (mem, val);
5839 if (insn)
5841 emit_insn (insn);
5842 return const0_rtx;
5846 /* Failing that, generate a compare-and-swap loop in which we perform the
5847 operation with normal arithmetic instructions. */
5848 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
5850 rtx t0 = gen_reg_rtx (mode), t1;
5852 start_sequence ();
5854 t1 = t0;
5855 if (code == NOT)
5857 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
5858 code = AND;
5860 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
5861 true, OPTAB_LIB_WIDEN);
5863 insn = get_insns ();
5864 end_sequence ();
5866 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
5867 return const0_rtx;
5870 return NULL_RTX;
5873 /* This function generates the atomic operation MEM CODE= VAL. In this
5874 case, we do care about the resulting value: if AFTER is true then
5875 return the value MEM holds after the operation, if AFTER is false
5876 then return the value MEM holds before the operation. TARGET is an
5877 optional place for the result value to be stored. */
5880 expand_sync_fetch_operation (rtx mem, rtx val, enum rtx_code code,
5881 bool after, rtx target)
5883 enum machine_mode mode = GET_MODE (mem);
5884 enum insn_code old_code, new_code, icode;
5885 bool compensate;
5886 rtx insn;
5888 /* Look to see if the target supports the operation directly. */
5889 switch (code)
5891 case PLUS:
5892 old_code = sync_old_add_optab[mode];
5893 new_code = sync_new_add_optab[mode];
5894 break;
5895 case IOR:
5896 old_code = sync_old_ior_optab[mode];
5897 new_code = sync_new_ior_optab[mode];
5898 break;
5899 case XOR:
5900 old_code = sync_old_xor_optab[mode];
5901 new_code = sync_new_xor_optab[mode];
5902 break;
5903 case AND:
5904 old_code = sync_old_and_optab[mode];
5905 new_code = sync_new_and_optab[mode];
5906 break;
5907 case NOT:
5908 old_code = sync_old_nand_optab[mode];
5909 new_code = sync_new_nand_optab[mode];
5910 break;
5912 case MINUS:
5913 old_code = sync_old_sub_optab[mode];
5914 new_code = sync_new_sub_optab[mode];
5915 if (old_code == CODE_FOR_nothing && new_code == CODE_FOR_nothing)
5917 old_code = sync_old_add_optab[mode];
5918 new_code = sync_new_add_optab[mode];
5919 if (old_code != CODE_FOR_nothing || new_code != CODE_FOR_nothing)
5921 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
5922 code = PLUS;
5925 break;
5927 default:
5928 gcc_unreachable ();
5931 /* If the target does supports the proper new/old operation, great. But
5932 if we only support the opposite old/new operation, check to see if we
5933 can compensate. In the case in which the old value is supported, then
5934 we can always perform the operation again with normal arithmetic. In
5935 the case in which the new value is supported, then we can only handle
5936 this in the case the operation is reversible. */
5937 compensate = false;
5938 if (after)
5940 icode = new_code;
5941 if (icode == CODE_FOR_nothing)
5943 icode = old_code;
5944 if (icode != CODE_FOR_nothing)
5945 compensate = true;
5948 else
5950 icode = old_code;
5951 if (icode == CODE_FOR_nothing
5952 && (code == PLUS || code == MINUS || code == XOR))
5954 icode = new_code;
5955 if (icode != CODE_FOR_nothing)
5956 compensate = true;
5960 /* If we found something supported, great. */
5961 if (icode != CODE_FOR_nothing)
5963 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5964 target = gen_reg_rtx (mode);
5966 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
5967 val = convert_modes (mode, GET_MODE (val), val, 1);
5968 if (!insn_data[icode].operand[2].predicate (val, mode))
5969 val = force_reg (mode, val);
5971 insn = GEN_FCN (icode) (target, mem, val);
5972 if (insn)
5974 emit_insn (insn);
5976 /* If we need to compensate for using an operation with the
5977 wrong return value, do so now. */
5978 if (compensate)
5980 if (!after)
5982 if (code == PLUS)
5983 code = MINUS;
5984 else if (code == MINUS)
5985 code = PLUS;
5988 if (code == NOT)
5989 target = expand_simple_unop (mode, NOT, target, NULL_RTX, true);
5990 target = expand_simple_binop (mode, code, target, val, NULL_RTX,
5991 true, OPTAB_LIB_WIDEN);
5994 return target;
5998 /* Failing that, generate a compare-and-swap loop in which we perform the
5999 operation with normal arithmetic instructions. */
6000 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6002 rtx t0 = gen_reg_rtx (mode), t1;
6004 if (!target || !register_operand (target, mode))
6005 target = gen_reg_rtx (mode);
6007 start_sequence ();
6009 if (!after)
6010 emit_move_insn (target, t0);
6011 t1 = t0;
6012 if (code == NOT)
6014 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6015 code = AND;
6017 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6018 true, OPTAB_LIB_WIDEN);
6019 if (after)
6020 emit_move_insn (target, t1);
6022 insn = get_insns ();
6023 end_sequence ();
6025 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6026 return target;
6029 return NULL_RTX;
6032 /* This function expands a test-and-set operation. Ideally we atomically
6033 store VAL in MEM and return the previous value in MEM. Some targets
6034 may not support this operation and only support VAL with the constant 1;
6035 in this case while the return value will be 0/1, but the exact value
6036 stored in MEM is target defined. TARGET is an option place to stick
6037 the return value. */
6040 expand_sync_lock_test_and_set (rtx mem, rtx val, rtx target)
6042 enum machine_mode mode = GET_MODE (mem);
6043 enum insn_code icode;
6044 rtx insn;
6046 /* If the target supports the test-and-set directly, great. */
6047 icode = sync_lock_test_and_set[mode];
6048 if (icode != CODE_FOR_nothing)
6050 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6051 target = gen_reg_rtx (mode);
6053 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6054 val = convert_modes (mode, GET_MODE (val), val, 1);
6055 if (!insn_data[icode].operand[2].predicate (val, mode))
6056 val = force_reg (mode, val);
6058 insn = GEN_FCN (icode) (target, mem, val);
6059 if (insn)
6061 emit_insn (insn);
6062 return target;
6066 /* Otherwise, use a compare-and-swap loop for the exchange. */
6067 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6069 if (!target || !register_operand (target, mode))
6070 target = gen_reg_rtx (mode);
6071 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6072 val = convert_modes (mode, GET_MODE (val), val, 1);
6073 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
6074 return target;
6077 return NULL_RTX;
6080 #include "gt-optabs.h"