PR fortran/15809
[official-gcc.git] / gcc / optabs.c
bloba72028e04bea0eb311a435da06ac0697e028faa3
1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "toplev.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
32 #include "rtl.h"
33 #include "tree.h"
34 #include "tm_p.h"
35 #include "flags.h"
36 #include "function.h"
37 #include "except.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "libfuncs.h"
41 #include "recog.h"
42 #include "reload.h"
43 #include "ggc.h"
44 #include "real.h"
45 #include "basic-block.h"
46 #include "target.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 optab optab_table[OTI_MAX];
59 rtx libfunc_table[LTI_MAX];
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table[COI_MAX];
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab[NUM_RTX_CODE + 1];
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
70 rtxfun bcc_gen_fctn[NUM_RTX_CODE];
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
76 enum insn_code setcc_gen_code[NUM_RTX_CODE];
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
84 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
85 #endif
87 /* Indexed by the machine mode, gives the insn code for vector conditional
88 operation. */
90 enum insn_code vcond_gen_code[NUM_MACHINE_MODES];
91 enum insn_code vcondu_gen_code[NUM_MACHINE_MODES];
93 /* The insn generating function can not take an rtx_code argument.
94 TRAP_RTX is used as an rtx argument. Its code is replaced with
95 the code to be used in the trap insn and all other fields are ignored. */
96 static GTY(()) rtx trap_rtx;
98 static int add_equal_note (rtx, rtx, enum rtx_code, rtx, rtx);
99 static rtx widen_operand (rtx, enum machine_mode, enum machine_mode, int,
100 int);
101 static void prepare_cmp_insn (rtx *, rtx *, enum rtx_code *, rtx,
102 enum machine_mode *, int *,
103 enum can_compare_purpose);
104 static enum insn_code can_fix_p (enum machine_mode, enum machine_mode, int,
105 int *);
106 static enum insn_code can_float_p (enum machine_mode, enum machine_mode, int);
107 static optab new_optab (void);
108 static convert_optab new_convert_optab (void);
109 static inline optab init_optab (enum rtx_code);
110 static inline optab init_optabv (enum rtx_code);
111 static inline convert_optab init_convert_optab (enum rtx_code);
112 static void init_libfuncs (optab, int, int, const char *, int);
113 static void init_integral_libfuncs (optab, const char *, int);
114 static void init_floating_libfuncs (optab, const char *, int);
115 static void init_interclass_conv_libfuncs (convert_optab, const char *,
116 enum mode_class, enum mode_class);
117 static void init_intraclass_conv_libfuncs (convert_optab, const char *,
118 enum mode_class, bool);
119 static void emit_cmp_and_jump_insn_1 (rtx, rtx, enum machine_mode,
120 enum rtx_code, int, rtx);
121 static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *,
122 enum machine_mode *, int *);
123 static rtx widen_clz (enum machine_mode, rtx, rtx);
124 static rtx expand_parity (enum machine_mode, rtx, rtx);
125 static enum rtx_code get_rtx_code (enum tree_code, bool);
126 static rtx vector_compare_rtx (tree, bool, enum insn_code);
128 #ifndef HAVE_conditional_trap
129 #define HAVE_conditional_trap 0
130 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
131 #endif
133 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
134 the result of operation CODE applied to OP0 (and OP1 if it is a binary
135 operation).
137 If the last insn does not set TARGET, don't do anything, but return 1.
139 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
140 don't add the REG_EQUAL note but return 0. Our caller can then try
141 again, ensuring that TARGET is not one of the operands. */
143 static int
144 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
146 rtx last_insn, insn, set;
147 rtx note;
149 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
151 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
152 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
153 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
154 && GET_RTX_CLASS (code) != RTX_COMPARE
155 && GET_RTX_CLASS (code) != RTX_UNARY)
156 return 1;
158 if (GET_CODE (target) == ZERO_EXTRACT)
159 return 1;
161 for (last_insn = insns;
162 NEXT_INSN (last_insn) != NULL_RTX;
163 last_insn = NEXT_INSN (last_insn))
166 set = single_set (last_insn);
167 if (set == NULL_RTX)
168 return 1;
170 if (! rtx_equal_p (SET_DEST (set), target)
171 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
172 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
173 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
174 return 1;
176 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
177 besides the last insn. */
178 if (reg_overlap_mentioned_p (target, op0)
179 || (op1 && reg_overlap_mentioned_p (target, op1)))
181 insn = PREV_INSN (last_insn);
182 while (insn != NULL_RTX)
184 if (reg_set_p (target, insn))
185 return 0;
187 insn = PREV_INSN (insn);
191 if (GET_RTX_CLASS (code) == RTX_UNARY)
192 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
193 else
194 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
196 set_unique_reg_note (last_insn, REG_EQUAL, note);
198 return 1;
201 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
202 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
203 not actually do a sign-extend or zero-extend, but can leave the
204 higher-order bits of the result rtx undefined, for example, in the case
205 of logical operations, but not right shifts. */
207 static rtx
208 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
209 int unsignedp, int no_extend)
211 rtx result;
213 /* If we don't have to extend and this is a constant, return it. */
214 if (no_extend && GET_MODE (op) == VOIDmode)
215 return op;
217 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
218 extend since it will be more efficient to do so unless the signedness of
219 a promoted object differs from our extension. */
220 if (! no_extend
221 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
222 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
223 return convert_modes (mode, oldmode, op, unsignedp);
225 /* If MODE is no wider than a single word, we return a paradoxical
226 SUBREG. */
227 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
228 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
230 /* Otherwise, get an object of MODE, clobber it, and set the low-order
231 part to OP. */
233 result = gen_reg_rtx (mode);
234 emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
235 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
236 return result;
239 /* Return the optab used for computing the operation given by
240 the tree code, CODE. This function is not always usable (for
241 example, it cannot give complete results for multiplication
242 or division) but probably ought to be relied on more widely
243 throughout the expander. */
244 optab
245 optab_for_tree_code (enum tree_code code, tree type)
247 bool trapv;
248 switch (code)
250 case BIT_AND_EXPR:
251 return and_optab;
253 case BIT_IOR_EXPR:
254 return ior_optab;
256 case BIT_NOT_EXPR:
257 return one_cmpl_optab;
259 case BIT_XOR_EXPR:
260 return xor_optab;
262 case TRUNC_MOD_EXPR:
263 case CEIL_MOD_EXPR:
264 case FLOOR_MOD_EXPR:
265 case ROUND_MOD_EXPR:
266 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
268 case RDIV_EXPR:
269 case TRUNC_DIV_EXPR:
270 case CEIL_DIV_EXPR:
271 case FLOOR_DIV_EXPR:
272 case ROUND_DIV_EXPR:
273 case EXACT_DIV_EXPR:
274 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
276 case LSHIFT_EXPR:
277 return ashl_optab;
279 case RSHIFT_EXPR:
280 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
282 case LROTATE_EXPR:
283 return rotl_optab;
285 case RROTATE_EXPR:
286 return rotr_optab;
288 case MAX_EXPR:
289 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
291 case MIN_EXPR:
292 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
294 case REALIGN_LOAD_EXPR:
295 return vec_realign_load_optab;
297 case REDUC_MAX_EXPR:
298 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
300 case REDUC_MIN_EXPR:
301 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
303 case REDUC_PLUS_EXPR:
304 return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
306 case VEC_LSHIFT_EXPR:
307 return vec_shl_optab;
309 case VEC_RSHIFT_EXPR:
310 return vec_shr_optab;
312 default:
313 break;
316 trapv = flag_trapv && INTEGRAL_TYPE_P (type) && !TYPE_UNSIGNED (type);
317 switch (code)
319 case PLUS_EXPR:
320 return trapv ? addv_optab : add_optab;
322 case MINUS_EXPR:
323 return trapv ? subv_optab : sub_optab;
325 case MULT_EXPR:
326 return trapv ? smulv_optab : smul_optab;
328 case NEGATE_EXPR:
329 return trapv ? negv_optab : neg_optab;
331 case ABS_EXPR:
332 return trapv ? absv_optab : abs_optab;
334 default:
335 return NULL;
340 /* Generate code to perform an operation specified by TERNARY_OPTAB
341 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
343 UNSIGNEDP is for the case where we have to widen the operands
344 to perform the operation. It says to use zero-extension.
346 If TARGET is nonzero, the value
347 is generated there, if it is convenient to do so.
348 In all cases an rtx is returned for the locus of the value;
349 this may or may not be TARGET. */
352 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
353 rtx op1, rtx op2, rtx target, int unsignedp)
355 int icode = (int) ternary_optab->handlers[(int) mode].insn_code;
356 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
357 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
358 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
359 rtx temp;
360 rtx pat;
361 rtx xop0 = op0, xop1 = op1, xop2 = op2;
363 gcc_assert (ternary_optab->handlers[(int) mode].insn_code
364 != CODE_FOR_nothing);
366 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
367 temp = gen_reg_rtx (mode);
368 else
369 temp = target;
371 /* In case the insn wants input operands in modes different from
372 those of the actual operands, convert the operands. It would
373 seem that we don't need to convert CONST_INTs, but we do, so
374 that they're properly zero-extended, sign-extended or truncated
375 for their mode. */
377 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
378 xop0 = convert_modes (mode0,
379 GET_MODE (op0) != VOIDmode
380 ? GET_MODE (op0)
381 : mode,
382 xop0, unsignedp);
384 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
385 xop1 = convert_modes (mode1,
386 GET_MODE (op1) != VOIDmode
387 ? GET_MODE (op1)
388 : mode,
389 xop1, unsignedp);
391 if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
392 xop2 = convert_modes (mode2,
393 GET_MODE (op2) != VOIDmode
394 ? GET_MODE (op2)
395 : mode,
396 xop2, unsignedp);
398 /* Now, if insn's predicates don't allow our operands, put them into
399 pseudo regs. */
401 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
402 && mode0 != VOIDmode)
403 xop0 = copy_to_mode_reg (mode0, xop0);
405 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
406 && mode1 != VOIDmode)
407 xop1 = copy_to_mode_reg (mode1, xop1);
409 if (!insn_data[icode].operand[3].predicate (xop2, mode2)
410 && mode2 != VOIDmode)
411 xop2 = copy_to_mode_reg (mode2, xop2);
413 pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
415 emit_insn (pat);
416 return temp;
420 /* Like expand_binop, but return a constant rtx if the result can be
421 calculated at compile time. The arguments and return value are
422 otherwise the same as for expand_binop. */
424 static rtx
425 simplify_expand_binop (enum machine_mode mode, optab binoptab,
426 rtx op0, rtx op1, rtx target, int unsignedp,
427 enum optab_methods methods)
429 if (CONSTANT_P (op0) && CONSTANT_P (op1))
430 return simplify_gen_binary (binoptab->code, mode, op0, op1);
431 else
432 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
435 /* Like simplify_expand_binop, but always put the result in TARGET.
436 Return true if the expansion succeeded. */
438 bool
439 force_expand_binop (enum machine_mode mode, optab binoptab,
440 rtx op0, rtx op1, rtx target, int unsignedp,
441 enum optab_methods methods)
443 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
444 target, unsignedp, methods);
445 if (x == 0)
446 return false;
447 if (x != target)
448 emit_move_insn (target, x);
449 return true;
452 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
455 expand_vec_shift_expr (tree vec_shift_expr, rtx target)
457 enum insn_code icode;
458 rtx rtx_op1, rtx_op2;
459 enum machine_mode mode1;
460 enum machine_mode mode2;
461 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_shift_expr));
462 tree vec_oprnd = TREE_OPERAND (vec_shift_expr, 0);
463 tree shift_oprnd = TREE_OPERAND (vec_shift_expr, 1);
464 optab shift_optab;
465 rtx pat;
467 switch (TREE_CODE (vec_shift_expr))
469 case VEC_RSHIFT_EXPR:
470 shift_optab = vec_shr_optab;
471 break;
472 case VEC_LSHIFT_EXPR:
473 shift_optab = vec_shl_optab;
474 break;
475 default:
476 gcc_unreachable ();
479 icode = (int) shift_optab->handlers[(int) mode].insn_code;
480 gcc_assert (icode != CODE_FOR_nothing);
482 mode1 = insn_data[icode].operand[1].mode;
483 mode2 = insn_data[icode].operand[2].mode;
485 rtx_op1 = expand_expr (vec_oprnd, NULL_RTX, VOIDmode, EXPAND_NORMAL);
486 if (!(*insn_data[icode].operand[1].predicate) (rtx_op1, mode1)
487 && mode1 != VOIDmode)
488 rtx_op1 = force_reg (mode1, rtx_op1);
490 rtx_op2 = expand_expr (shift_oprnd, NULL_RTX, VOIDmode, EXPAND_NORMAL);
491 if (!(*insn_data[icode].operand[2].predicate) (rtx_op2, mode2)
492 && mode2 != VOIDmode)
493 rtx_op2 = force_reg (mode2, rtx_op2);
495 if (!target
496 || ! (*insn_data[icode].operand[0].predicate) (target, mode))
497 target = gen_reg_rtx (mode);
499 /* Emit instruction */
500 pat = GEN_FCN (icode) (target, rtx_op1, rtx_op2);
501 gcc_assert (pat);
502 emit_insn (pat);
504 return target;
507 /* This subroutine of expand_doubleword_shift handles the cases in which
508 the effective shift value is >= BITS_PER_WORD. The arguments and return
509 value are the same as for the parent routine, except that SUPERWORD_OP1
510 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
511 INTO_TARGET may be null if the caller has decided to calculate it. */
513 static bool
514 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
515 rtx outof_target, rtx into_target,
516 int unsignedp, enum optab_methods methods)
518 if (into_target != 0)
519 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
520 into_target, unsignedp, methods))
521 return false;
523 if (outof_target != 0)
525 /* For a signed right shift, we must fill OUTOF_TARGET with copies
526 of the sign bit, otherwise we must fill it with zeros. */
527 if (binoptab != ashr_optab)
528 emit_move_insn (outof_target, CONST0_RTX (word_mode));
529 else
530 if (!force_expand_binop (word_mode, binoptab,
531 outof_input, GEN_INT (BITS_PER_WORD - 1),
532 outof_target, unsignedp, methods))
533 return false;
535 return true;
538 /* This subroutine of expand_doubleword_shift handles the cases in which
539 the effective shift value is < BITS_PER_WORD. The arguments and return
540 value are the same as for the parent routine. */
542 static bool
543 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
544 rtx outof_input, rtx into_input, rtx op1,
545 rtx outof_target, rtx into_target,
546 int unsignedp, enum optab_methods methods,
547 unsigned HOST_WIDE_INT shift_mask)
549 optab reverse_unsigned_shift, unsigned_shift;
550 rtx tmp, carries;
552 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
553 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
555 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
556 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
557 the opposite direction to BINOPTAB. */
558 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
560 carries = outof_input;
561 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
562 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
563 0, true, methods);
565 else
567 /* We must avoid shifting by BITS_PER_WORD bits since that is either
568 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
569 has unknown behavior. Do a single shift first, then shift by the
570 remainder. It's OK to use ~OP1 as the remainder if shift counts
571 are truncated to the mode size. */
572 carries = expand_binop (word_mode, reverse_unsigned_shift,
573 outof_input, const1_rtx, 0, unsignedp, methods);
574 if (shift_mask == BITS_PER_WORD - 1)
576 tmp = immed_double_const (-1, -1, op1_mode);
577 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
578 0, true, methods);
580 else
582 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
583 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
584 0, true, methods);
587 if (tmp == 0 || carries == 0)
588 return false;
589 carries = expand_binop (word_mode, reverse_unsigned_shift,
590 carries, tmp, 0, unsignedp, methods);
591 if (carries == 0)
592 return false;
594 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
595 so the result can go directly into INTO_TARGET if convenient. */
596 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
597 into_target, unsignedp, methods);
598 if (tmp == 0)
599 return false;
601 /* Now OR in the bits carried over from OUTOF_INPUT. */
602 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
603 into_target, unsignedp, methods))
604 return false;
606 /* Use a standard word_mode shift for the out-of half. */
607 if (outof_target != 0)
608 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
609 outof_target, unsignedp, methods))
610 return false;
612 return true;
616 #ifdef HAVE_conditional_move
617 /* Try implementing expand_doubleword_shift using conditional moves.
618 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
619 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
620 are the shift counts to use in the former and latter case. All other
621 arguments are the same as the parent routine. */
623 static bool
624 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
625 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
626 rtx outof_input, rtx into_input,
627 rtx subword_op1, rtx superword_op1,
628 rtx outof_target, rtx into_target,
629 int unsignedp, enum optab_methods methods,
630 unsigned HOST_WIDE_INT shift_mask)
632 rtx outof_superword, into_superword;
634 /* Put the superword version of the output into OUTOF_SUPERWORD and
635 INTO_SUPERWORD. */
636 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
637 if (outof_target != 0 && subword_op1 == superword_op1)
639 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
640 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
641 into_superword = outof_target;
642 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
643 outof_superword, 0, unsignedp, methods))
644 return false;
646 else
648 into_superword = gen_reg_rtx (word_mode);
649 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
650 outof_superword, into_superword,
651 unsignedp, methods))
652 return false;
655 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
656 if (!expand_subword_shift (op1_mode, binoptab,
657 outof_input, into_input, subword_op1,
658 outof_target, into_target,
659 unsignedp, methods, shift_mask))
660 return false;
662 /* Select between them. Do the INTO half first because INTO_SUPERWORD
663 might be the current value of OUTOF_TARGET. */
664 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
665 into_target, into_superword, word_mode, false))
666 return false;
668 if (outof_target != 0)
669 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
670 outof_target, outof_superword,
671 word_mode, false))
672 return false;
674 return true;
676 #endif
678 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
679 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
680 input operand; the shift moves bits in the direction OUTOF_INPUT->
681 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
682 of the target. OP1 is the shift count and OP1_MODE is its mode.
683 If OP1 is constant, it will have been truncated as appropriate
684 and is known to be nonzero.
686 If SHIFT_MASK is zero, the result of word shifts is undefined when the
687 shift count is outside the range [0, BITS_PER_WORD). This routine must
688 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
690 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
691 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
692 fill with zeros or sign bits as appropriate.
694 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
695 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
696 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
697 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
698 are undefined.
700 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
701 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
702 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
703 function wants to calculate it itself.
705 Return true if the shift could be successfully synthesized. */
707 static bool
708 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
709 rtx outof_input, rtx into_input, rtx op1,
710 rtx outof_target, rtx into_target,
711 int unsignedp, enum optab_methods methods,
712 unsigned HOST_WIDE_INT shift_mask)
714 rtx superword_op1, tmp, cmp1, cmp2;
715 rtx subword_label, done_label;
716 enum rtx_code cmp_code;
718 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
719 fill the result with sign or zero bits as appropriate. If so, the value
720 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
721 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
722 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
724 This isn't worthwhile for constant shifts since the optimizers will
725 cope better with in-range shift counts. */
726 if (shift_mask >= BITS_PER_WORD
727 && outof_target != 0
728 && !CONSTANT_P (op1))
730 if (!expand_doubleword_shift (op1_mode, binoptab,
731 outof_input, into_input, op1,
732 0, into_target,
733 unsignedp, methods, shift_mask))
734 return false;
735 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
736 outof_target, unsignedp, methods))
737 return false;
738 return true;
741 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
742 is true when the effective shift value is less than BITS_PER_WORD.
743 Set SUPERWORD_OP1 to the shift count that should be used to shift
744 OUTOF_INPUT into INTO_TARGET when the condition is false. */
745 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
746 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
748 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
749 is a subword shift count. */
750 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
751 0, true, methods);
752 cmp2 = CONST0_RTX (op1_mode);
753 cmp_code = EQ;
754 superword_op1 = op1;
756 else
758 /* Set CMP1 to OP1 - BITS_PER_WORD. */
759 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
760 0, true, methods);
761 cmp2 = CONST0_RTX (op1_mode);
762 cmp_code = LT;
763 superword_op1 = cmp1;
765 if (cmp1 == 0)
766 return false;
768 /* If we can compute the condition at compile time, pick the
769 appropriate subroutine. */
770 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
771 if (tmp != 0 && GET_CODE (tmp) == CONST_INT)
773 if (tmp == const0_rtx)
774 return expand_superword_shift (binoptab, outof_input, superword_op1,
775 outof_target, into_target,
776 unsignedp, methods);
777 else
778 return expand_subword_shift (op1_mode, binoptab,
779 outof_input, into_input, op1,
780 outof_target, into_target,
781 unsignedp, methods, shift_mask);
784 #ifdef HAVE_conditional_move
785 /* Try using conditional moves to generate straight-line code. */
787 rtx start = get_last_insn ();
788 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
789 cmp_code, cmp1, cmp2,
790 outof_input, into_input,
791 op1, superword_op1,
792 outof_target, into_target,
793 unsignedp, methods, shift_mask))
794 return true;
795 delete_insns_since (start);
797 #endif
799 /* As a last resort, use branches to select the correct alternative. */
800 subword_label = gen_label_rtx ();
801 done_label = gen_label_rtx ();
803 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
804 0, 0, subword_label);
806 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
807 outof_target, into_target,
808 unsignedp, methods))
809 return false;
811 emit_jump_insn (gen_jump (done_label));
812 emit_barrier ();
813 emit_label (subword_label);
815 if (!expand_subword_shift (op1_mode, binoptab,
816 outof_input, into_input, op1,
817 outof_target, into_target,
818 unsignedp, methods, shift_mask))
819 return false;
821 emit_label (done_label);
822 return true;
825 /* Subroutine of expand_binop. Perform a double word multiplication of
826 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
827 as the target's word_mode. This function return NULL_RTX if anything
828 goes wrong, in which case it may have already emitted instructions
829 which need to be deleted.
831 If we want to multiply two two-word values and have normal and widening
832 multiplies of single-word values, we can do this with three smaller
833 multiplications. Note that we do not make a REG_NO_CONFLICT block here
834 because we are not operating on one word at a time.
836 The multiplication proceeds as follows:
837 _______________________
838 [__op0_high_|__op0_low__]
839 _______________________
840 * [__op1_high_|__op1_low__]
841 _______________________________________________
842 _______________________
843 (1) [__op0_low__*__op1_low__]
844 _______________________
845 (2a) [__op0_low__*__op1_high_]
846 _______________________
847 (2b) [__op0_high_*__op1_low__]
848 _______________________
849 (3) [__op0_high_*__op1_high_]
852 This gives a 4-word result. Since we are only interested in the
853 lower 2 words, partial result (3) and the upper words of (2a) and
854 (2b) don't need to be calculated. Hence (2a) and (2b) can be
855 calculated using non-widening multiplication.
857 (1), however, needs to be calculated with an unsigned widening
858 multiplication. If this operation is not directly supported we
859 try using a signed widening multiplication and adjust the result.
860 This adjustment works as follows:
862 If both operands are positive then no adjustment is needed.
864 If the operands have different signs, for example op0_low < 0 and
865 op1_low >= 0, the instruction treats the most significant bit of
866 op0_low as a sign bit instead of a bit with significance
867 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
868 with 2**BITS_PER_WORD - op0_low, and two's complements the
869 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
870 the result.
872 Similarly, if both operands are negative, we need to add
873 (op0_low + op1_low) * 2**BITS_PER_WORD.
875 We use a trick to adjust quickly. We logically shift op0_low right
876 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
877 op0_high (op1_high) before it is used to calculate 2b (2a). If no
878 logical shift exists, we do an arithmetic right shift and subtract
879 the 0 or -1. */
881 static rtx
882 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
883 bool umulp, enum optab_methods methods)
885 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
886 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
887 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
888 rtx product, adjust, product_high, temp;
890 rtx op0_high = operand_subword_force (op0, high, mode);
891 rtx op0_low = operand_subword_force (op0, low, mode);
892 rtx op1_high = operand_subword_force (op1, high, mode);
893 rtx op1_low = operand_subword_force (op1, low, mode);
895 /* If we're using an unsigned multiply to directly compute the product
896 of the low-order words of the operands and perform any required
897 adjustments of the operands, we begin by trying two more multiplications
898 and then computing the appropriate sum.
900 We have checked above that the required addition is provided.
901 Full-word addition will normally always succeed, especially if
902 it is provided at all, so we don't worry about its failure. The
903 multiplication may well fail, however, so we do handle that. */
905 if (!umulp)
907 /* ??? This could be done with emit_store_flag where available. */
908 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
909 NULL_RTX, 1, methods);
910 if (temp)
911 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
912 NULL_RTX, 0, OPTAB_DIRECT);
913 else
915 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
916 NULL_RTX, 0, methods);
917 if (!temp)
918 return NULL_RTX;
919 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
920 NULL_RTX, 0, OPTAB_DIRECT);
923 if (!op0_high)
924 return NULL_RTX;
927 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
928 NULL_RTX, 0, OPTAB_DIRECT);
929 if (!adjust)
930 return NULL_RTX;
932 /* OP0_HIGH should now be dead. */
934 if (!umulp)
936 /* ??? This could be done with emit_store_flag where available. */
937 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
938 NULL_RTX, 1, methods);
939 if (temp)
940 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
941 NULL_RTX, 0, OPTAB_DIRECT);
942 else
944 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
945 NULL_RTX, 0, methods);
946 if (!temp)
947 return NULL_RTX;
948 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
949 NULL_RTX, 0, OPTAB_DIRECT);
952 if (!op1_high)
953 return NULL_RTX;
956 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
957 NULL_RTX, 0, OPTAB_DIRECT);
958 if (!temp)
959 return NULL_RTX;
961 /* OP1_HIGH should now be dead. */
963 adjust = expand_binop (word_mode, add_optab, adjust, temp,
964 adjust, 0, OPTAB_DIRECT);
966 if (target && !REG_P (target))
967 target = NULL_RTX;
969 if (umulp)
970 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
971 target, 1, OPTAB_DIRECT);
972 else
973 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
974 target, 1, OPTAB_DIRECT);
976 if (!product)
977 return NULL_RTX;
979 product_high = operand_subword (product, high, 1, mode);
980 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
981 REG_P (product_high) ? product_high : adjust,
982 0, OPTAB_DIRECT);
983 emit_move_insn (product_high, adjust);
984 return product;
987 /* Wrapper around expand_binop which takes an rtx code to specify
988 the operation to perform, not an optab pointer. All other
989 arguments are the same. */
991 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
992 rtx op1, rtx target, int unsignedp,
993 enum optab_methods methods)
995 optab binop = code_to_optab[(int) code];
996 gcc_assert (binop);
998 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1001 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1002 binop. Order them according to commutative_operand_precedence and, if
1003 possible, try to put TARGET or a pseudo first. */
1004 static bool
1005 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
1007 int op0_prec = commutative_operand_precedence (op0);
1008 int op1_prec = commutative_operand_precedence (op1);
1010 if (op0_prec < op1_prec)
1011 return true;
1013 if (op0_prec > op1_prec)
1014 return false;
1016 /* With equal precedence, both orders are ok, but it is better if the
1017 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1018 if (target == 0 || REG_P (target))
1019 return (REG_P (op1) && !REG_P (op0)) || target == op1;
1020 else
1021 return rtx_equal_p (op1, target);
1025 /* Generate code to perform an operation specified by BINOPTAB
1026 on operands OP0 and OP1, with result having machine-mode MODE.
1028 UNSIGNEDP is for the case where we have to widen the operands
1029 to perform the operation. It says to use zero-extension.
1031 If TARGET is nonzero, the value
1032 is generated there, if it is convenient to do so.
1033 In all cases an rtx is returned for the locus of the value;
1034 this may or may not be TARGET. */
1037 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
1038 rtx target, int unsignedp, enum optab_methods methods)
1040 enum optab_methods next_methods
1041 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1042 ? OPTAB_WIDEN : methods);
1043 enum mode_class class;
1044 enum machine_mode wider_mode;
1045 rtx temp;
1046 int commutative_op = 0;
1047 int shift_op = (binoptab->code == ASHIFT
1048 || binoptab->code == ASHIFTRT
1049 || binoptab->code == LSHIFTRT
1050 || binoptab->code == ROTATE
1051 || binoptab->code == ROTATERT);
1052 rtx entry_last = get_last_insn ();
1053 rtx last;
1054 bool first_pass_p = true;
1056 class = GET_MODE_CLASS (mode);
1058 /* If subtracting an integer constant, convert this into an addition of
1059 the negated constant. */
1061 if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
1063 op1 = negate_rtx (mode, op1);
1064 binoptab = add_optab;
1067 /* If we are inside an appropriately-short loop and we are optimizing,
1068 force expensive constants into a register. */
1069 if (CONSTANT_P (op0) && optimize
1070 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
1072 if (GET_MODE (op0) != VOIDmode)
1073 op0 = convert_modes (mode, VOIDmode, op0, unsignedp);
1074 op0 = force_reg (mode, op0);
1077 if (CONSTANT_P (op1) && optimize
1078 && ! shift_op && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
1080 if (GET_MODE (op1) != VOIDmode)
1081 op1 = convert_modes (mode, VOIDmode, op1, unsignedp);
1082 op1 = force_reg (mode, op1);
1085 /* Record where to delete back to if we backtrack. */
1086 last = get_last_insn ();
1088 /* If operation is commutative,
1089 try to make the first operand a register.
1090 Even better, try to make it the same as the target.
1091 Also try to make the last operand a constant. */
1092 if (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1093 || binoptab == smul_widen_optab
1094 || binoptab == umul_widen_optab
1095 || binoptab == smul_highpart_optab
1096 || binoptab == umul_highpart_optab)
1098 commutative_op = 1;
1100 if (swap_commutative_operands_with_target (target, op0, op1))
1102 temp = op1;
1103 op1 = op0;
1104 op0 = temp;
1108 retry:
1110 /* If we can do it with a three-operand insn, do so. */
1112 if (methods != OPTAB_MUST_WIDEN
1113 && binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1115 int icode = (int) binoptab->handlers[(int) mode].insn_code;
1116 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1117 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1118 rtx pat;
1119 rtx xop0 = op0, xop1 = op1;
1121 if (target)
1122 temp = target;
1123 else
1124 temp = gen_reg_rtx (mode);
1126 /* If it is a commutative operator and the modes would match
1127 if we would swap the operands, we can save the conversions. */
1128 if (commutative_op)
1130 if (GET_MODE (op0) != mode0 && GET_MODE (op1) != mode1
1131 && GET_MODE (op0) == mode1 && GET_MODE (op1) == mode0)
1133 rtx tmp;
1135 tmp = op0; op0 = op1; op1 = tmp;
1136 tmp = xop0; xop0 = xop1; xop1 = tmp;
1140 /* In case the insn wants input operands in modes different from
1141 those of the actual operands, convert the operands. It would
1142 seem that we don't need to convert CONST_INTs, but we do, so
1143 that they're properly zero-extended, sign-extended or truncated
1144 for their mode. */
1146 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
1147 xop0 = convert_modes (mode0,
1148 GET_MODE (op0) != VOIDmode
1149 ? GET_MODE (op0)
1150 : mode,
1151 xop0, unsignedp);
1153 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
1154 xop1 = convert_modes (mode1,
1155 GET_MODE (op1) != VOIDmode
1156 ? GET_MODE (op1)
1157 : mode,
1158 xop1, unsignedp);
1160 /* Now, if insn's predicates don't allow our operands, put them into
1161 pseudo regs. */
1163 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
1164 && mode0 != VOIDmode)
1165 xop0 = copy_to_mode_reg (mode0, xop0);
1167 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
1168 && mode1 != VOIDmode)
1169 xop1 = copy_to_mode_reg (mode1, xop1);
1171 if (!insn_data[icode].operand[0].predicate (temp, mode))
1172 temp = gen_reg_rtx (mode);
1174 pat = GEN_FCN (icode) (temp, xop0, xop1);
1175 if (pat)
1177 /* If PAT is composed of more than one insn, try to add an appropriate
1178 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1179 operand, call ourselves again, this time without a target. */
1180 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1181 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
1183 delete_insns_since (last);
1184 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1185 unsignedp, methods);
1188 emit_insn (pat);
1189 return temp;
1191 else
1192 delete_insns_since (last);
1195 /* If we were trying to rotate by a constant value, and that didn't
1196 work, try rotating the other direction before falling back to
1197 shifts and bitwise-or. */
1198 if (first_pass_p
1199 && (binoptab == rotl_optab || binoptab == rotr_optab)
1200 && class == MODE_INT
1201 && GET_CODE (op1) == CONST_INT
1202 && INTVAL (op1) > 0
1203 && (unsigned int) INTVAL (op1) < GET_MODE_BITSIZE (mode))
1205 first_pass_p = false;
1206 op1 = GEN_INT (GET_MODE_BITSIZE (mode) - INTVAL (op1));
1207 binoptab = binoptab == rotl_optab ? rotr_optab : rotl_optab;
1208 goto retry;
1211 /* If this is a multiply, see if we can do a widening operation that
1212 takes operands of this mode and makes a wider mode. */
1214 if (binoptab == smul_optab
1215 && GET_MODE_WIDER_MODE (mode) != VOIDmode
1216 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1217 ->handlers[(int) GET_MODE_WIDER_MODE (mode)].insn_code)
1218 != CODE_FOR_nothing))
1220 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
1221 unsignedp ? umul_widen_optab : smul_widen_optab,
1222 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1224 if (temp != 0)
1226 if (GET_MODE_CLASS (mode) == MODE_INT
1227 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1228 GET_MODE_BITSIZE (GET_MODE (temp))))
1229 return gen_lowpart (mode, temp);
1230 else
1231 return convert_to_mode (mode, temp, unsignedp);
1235 /* Look for a wider mode of the same class for which we think we
1236 can open-code the operation. Check for a widening multiply at the
1237 wider mode as well. */
1239 if (CLASS_HAS_WIDER_MODES_P (class)
1240 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1241 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1242 wider_mode != VOIDmode;
1243 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1245 if (binoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
1246 || (binoptab == smul_optab
1247 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1248 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1249 ->handlers[(int) GET_MODE_WIDER_MODE (wider_mode)].insn_code)
1250 != CODE_FOR_nothing)))
1252 rtx xop0 = op0, xop1 = op1;
1253 int no_extend = 0;
1255 /* For certain integer operations, we need not actually extend
1256 the narrow operands, as long as we will truncate
1257 the results to the same narrowness. */
1259 if ((binoptab == ior_optab || binoptab == and_optab
1260 || binoptab == xor_optab
1261 || binoptab == add_optab || binoptab == sub_optab
1262 || binoptab == smul_optab || binoptab == ashl_optab)
1263 && class == MODE_INT)
1264 no_extend = 1;
1266 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1268 /* The second operand of a shift must always be extended. */
1269 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1270 no_extend && binoptab != ashl_optab);
1272 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1273 unsignedp, OPTAB_DIRECT);
1274 if (temp)
1276 if (class != MODE_INT
1277 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1278 GET_MODE_BITSIZE (wider_mode)))
1280 if (target == 0)
1281 target = gen_reg_rtx (mode);
1282 convert_move (target, temp, 0);
1283 return target;
1285 else
1286 return gen_lowpart (mode, temp);
1288 else
1289 delete_insns_since (last);
1293 /* These can be done a word at a time. */
1294 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1295 && class == MODE_INT
1296 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1297 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1299 int i;
1300 rtx insns;
1301 rtx equiv_value;
1303 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1304 won't be accurate, so use a new target. */
1305 if (target == 0 || target == op0 || target == op1)
1306 target = gen_reg_rtx (mode);
1308 start_sequence ();
1310 /* Do the actual arithmetic. */
1311 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1313 rtx target_piece = operand_subword (target, i, 1, mode);
1314 rtx x = expand_binop (word_mode, binoptab,
1315 operand_subword_force (op0, i, mode),
1316 operand_subword_force (op1, i, mode),
1317 target_piece, unsignedp, next_methods);
1319 if (x == 0)
1320 break;
1322 if (target_piece != x)
1323 emit_move_insn (target_piece, x);
1326 insns = get_insns ();
1327 end_sequence ();
1329 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1331 if (binoptab->code != UNKNOWN)
1332 equiv_value
1333 = gen_rtx_fmt_ee (binoptab->code, mode,
1334 copy_rtx (op0), copy_rtx (op1));
1335 else
1336 equiv_value = 0;
1338 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1339 return target;
1343 /* Synthesize double word shifts from single word shifts. */
1344 if ((binoptab == lshr_optab || binoptab == ashl_optab
1345 || binoptab == ashr_optab)
1346 && class == MODE_INT
1347 && (GET_CODE (op1) == CONST_INT || !optimize_size)
1348 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1349 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1350 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1351 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1353 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1354 enum machine_mode op1_mode;
1356 double_shift_mask = targetm.shift_truncation_mask (mode);
1357 shift_mask = targetm.shift_truncation_mask (word_mode);
1358 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1360 /* Apply the truncation to constant shifts. */
1361 if (double_shift_mask > 0 && GET_CODE (op1) == CONST_INT)
1362 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1364 if (op1 == CONST0_RTX (op1_mode))
1365 return op0;
1367 /* Make sure that this is a combination that expand_doubleword_shift
1368 can handle. See the comments there for details. */
1369 if (double_shift_mask == 0
1370 || (shift_mask == BITS_PER_WORD - 1
1371 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1373 rtx insns, equiv_value;
1374 rtx into_target, outof_target;
1375 rtx into_input, outof_input;
1376 int left_shift, outof_word;
1378 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1379 won't be accurate, so use a new target. */
1380 if (target == 0 || target == op0 || target == op1)
1381 target = gen_reg_rtx (mode);
1383 start_sequence ();
1385 /* OUTOF_* is the word we are shifting bits away from, and
1386 INTO_* is the word that we are shifting bits towards, thus
1387 they differ depending on the direction of the shift and
1388 WORDS_BIG_ENDIAN. */
1390 left_shift = binoptab == ashl_optab;
1391 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1393 outof_target = operand_subword (target, outof_word, 1, mode);
1394 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1396 outof_input = operand_subword_force (op0, outof_word, mode);
1397 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1399 if (expand_doubleword_shift (op1_mode, binoptab,
1400 outof_input, into_input, op1,
1401 outof_target, into_target,
1402 unsignedp, methods, shift_mask))
1404 insns = get_insns ();
1405 end_sequence ();
1407 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1408 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1409 return target;
1411 end_sequence ();
1415 /* Synthesize double word rotates from single word shifts. */
1416 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1417 && class == MODE_INT
1418 && GET_CODE (op1) == CONST_INT
1419 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1420 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1421 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1423 rtx insns, equiv_value;
1424 rtx into_target, outof_target;
1425 rtx into_input, outof_input;
1426 rtx inter;
1427 int shift_count, left_shift, outof_word;
1429 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1430 won't be accurate, so use a new target. Do this also if target is not
1431 a REG, first because having a register instead may open optimization
1432 opportunities, and second because if target and op0 happen to be MEMs
1433 designating the same location, we would risk clobbering it too early
1434 in the code sequence we generate below. */
1435 if (target == 0 || target == op0 || target == op1 || ! REG_P (target))
1436 target = gen_reg_rtx (mode);
1438 start_sequence ();
1440 shift_count = INTVAL (op1);
1442 /* OUTOF_* is the word we are shifting bits away from, and
1443 INTO_* is the word that we are shifting bits towards, thus
1444 they differ depending on the direction of the shift and
1445 WORDS_BIG_ENDIAN. */
1447 left_shift = (binoptab == rotl_optab);
1448 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1450 outof_target = operand_subword (target, outof_word, 1, mode);
1451 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1453 outof_input = operand_subword_force (op0, outof_word, mode);
1454 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1456 if (shift_count == BITS_PER_WORD)
1458 /* This is just a word swap. */
1459 emit_move_insn (outof_target, into_input);
1460 emit_move_insn (into_target, outof_input);
1461 inter = const0_rtx;
1463 else
1465 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1466 rtx first_shift_count, second_shift_count;
1467 optab reverse_unsigned_shift, unsigned_shift;
1469 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1470 ? lshr_optab : ashl_optab);
1472 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1473 ? ashl_optab : lshr_optab);
1475 if (shift_count > BITS_PER_WORD)
1477 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1478 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1480 else
1482 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1483 second_shift_count = GEN_INT (shift_count);
1486 into_temp1 = expand_binop (word_mode, unsigned_shift,
1487 outof_input, first_shift_count,
1488 NULL_RTX, unsignedp, next_methods);
1489 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1490 into_input, second_shift_count,
1491 NULL_RTX, unsignedp, next_methods);
1493 if (into_temp1 != 0 && into_temp2 != 0)
1494 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1495 into_target, unsignedp, next_methods);
1496 else
1497 inter = 0;
1499 if (inter != 0 && inter != into_target)
1500 emit_move_insn (into_target, inter);
1502 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1503 into_input, first_shift_count,
1504 NULL_RTX, unsignedp, next_methods);
1505 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1506 outof_input, second_shift_count,
1507 NULL_RTX, unsignedp, next_methods);
1509 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1510 inter = expand_binop (word_mode, ior_optab,
1511 outof_temp1, outof_temp2,
1512 outof_target, unsignedp, next_methods);
1514 if (inter != 0 && inter != outof_target)
1515 emit_move_insn (outof_target, inter);
1518 insns = get_insns ();
1519 end_sequence ();
1521 if (inter != 0)
1523 if (binoptab->code != UNKNOWN)
1524 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1525 else
1526 equiv_value = 0;
1528 /* We can't make this a no conflict block if this is a word swap,
1529 because the word swap case fails if the input and output values
1530 are in the same register. */
1531 if (shift_count != BITS_PER_WORD)
1532 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1533 else
1534 emit_insn (insns);
1537 return target;
1541 /* These can be done a word at a time by propagating carries. */
1542 if ((binoptab == add_optab || binoptab == sub_optab)
1543 && class == MODE_INT
1544 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1545 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1547 unsigned int i;
1548 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1549 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1550 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1551 rtx xop0, xop1, xtarget;
1553 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1554 value is one of those, use it. Otherwise, use 1 since it is the
1555 one easiest to get. */
1556 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1557 int normalizep = STORE_FLAG_VALUE;
1558 #else
1559 int normalizep = 1;
1560 #endif
1562 /* Prepare the operands. */
1563 xop0 = force_reg (mode, op0);
1564 xop1 = force_reg (mode, op1);
1566 xtarget = gen_reg_rtx (mode);
1568 if (target == 0 || !REG_P (target))
1569 target = xtarget;
1571 /* Indicate for flow that the entire target reg is being set. */
1572 if (REG_P (target))
1573 emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget));
1575 /* Do the actual arithmetic. */
1576 for (i = 0; i < nwords; i++)
1578 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1579 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1580 rtx op0_piece = operand_subword_force (xop0, index, mode);
1581 rtx op1_piece = operand_subword_force (xop1, index, mode);
1582 rtx x;
1584 /* Main add/subtract of the input operands. */
1585 x = expand_binop (word_mode, binoptab,
1586 op0_piece, op1_piece,
1587 target_piece, unsignedp, next_methods);
1588 if (x == 0)
1589 break;
1591 if (i + 1 < nwords)
1593 /* Store carry from main add/subtract. */
1594 carry_out = gen_reg_rtx (word_mode);
1595 carry_out = emit_store_flag_force (carry_out,
1596 (binoptab == add_optab
1597 ? LT : GT),
1598 x, op0_piece,
1599 word_mode, 1, normalizep);
1602 if (i > 0)
1604 rtx newx;
1606 /* Add/subtract previous carry to main result. */
1607 newx = expand_binop (word_mode,
1608 normalizep == 1 ? binoptab : otheroptab,
1609 x, carry_in,
1610 NULL_RTX, 1, next_methods);
1612 if (i + 1 < nwords)
1614 /* Get out carry from adding/subtracting carry in. */
1615 rtx carry_tmp = gen_reg_rtx (word_mode);
1616 carry_tmp = emit_store_flag_force (carry_tmp,
1617 (binoptab == add_optab
1618 ? LT : GT),
1619 newx, x,
1620 word_mode, 1, normalizep);
1622 /* Logical-ior the two poss. carry together. */
1623 carry_out = expand_binop (word_mode, ior_optab,
1624 carry_out, carry_tmp,
1625 carry_out, 0, next_methods);
1626 if (carry_out == 0)
1627 break;
1629 emit_move_insn (target_piece, newx);
1631 else
1633 if (x != target_piece)
1634 emit_move_insn (target_piece, x);
1637 carry_in = carry_out;
1640 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
1642 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
1643 || ! rtx_equal_p (target, xtarget))
1645 rtx temp = emit_move_insn (target, xtarget);
1647 set_unique_reg_note (temp,
1648 REG_EQUAL,
1649 gen_rtx_fmt_ee (binoptab->code, mode,
1650 copy_rtx (xop0),
1651 copy_rtx (xop1)));
1653 else
1654 target = xtarget;
1656 return target;
1659 else
1660 delete_insns_since (last);
1663 /* Attempt to synthesize double word multiplies using a sequence of word
1664 mode multiplications. We first attempt to generate a sequence using a
1665 more efficient unsigned widening multiply, and if that fails we then
1666 try using a signed widening multiply. */
1668 if (binoptab == smul_optab
1669 && class == MODE_INT
1670 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1671 && smul_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1672 && add_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1674 rtx product = NULL_RTX;
1676 if (umul_widen_optab->handlers[(int) mode].insn_code
1677 != CODE_FOR_nothing)
1679 product = expand_doubleword_mult (mode, op0, op1, target,
1680 true, methods);
1681 if (!product)
1682 delete_insns_since (last);
1685 if (product == NULL_RTX
1686 && smul_widen_optab->handlers[(int) mode].insn_code
1687 != CODE_FOR_nothing)
1689 product = expand_doubleword_mult (mode, op0, op1, target,
1690 false, methods);
1691 if (!product)
1692 delete_insns_since (last);
1695 if (product != NULL_RTX)
1697 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1699 temp = emit_move_insn (target ? target : product, product);
1700 set_unique_reg_note (temp,
1701 REG_EQUAL,
1702 gen_rtx_fmt_ee (MULT, mode,
1703 copy_rtx (op0),
1704 copy_rtx (op1)));
1706 return product;
1710 /* It can't be open-coded in this mode.
1711 Use a library call if one is available and caller says that's ok. */
1713 if (binoptab->handlers[(int) mode].libfunc
1714 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1716 rtx insns;
1717 rtx op1x = op1;
1718 enum machine_mode op1_mode = mode;
1719 rtx value;
1721 start_sequence ();
1723 if (shift_op)
1725 op1_mode = word_mode;
1726 /* Specify unsigned here,
1727 since negative shift counts are meaningless. */
1728 op1x = convert_to_mode (word_mode, op1, 1);
1731 if (GET_MODE (op0) != VOIDmode
1732 && GET_MODE (op0) != mode)
1733 op0 = convert_to_mode (mode, op0, unsignedp);
1735 /* Pass 1 for NO_QUEUE so we don't lose any increments
1736 if the libcall is cse'd or moved. */
1737 value = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
1738 NULL_RTX, LCT_CONST, mode, 2,
1739 op0, mode, op1x, op1_mode);
1741 insns = get_insns ();
1742 end_sequence ();
1744 target = gen_reg_rtx (mode);
1745 emit_libcall_block (insns, target, value,
1746 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
1748 return target;
1751 delete_insns_since (last);
1753 /* It can't be done in this mode. Can we do it in a wider mode? */
1755 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
1756 || methods == OPTAB_MUST_WIDEN))
1758 /* Caller says, don't even try. */
1759 delete_insns_since (entry_last);
1760 return 0;
1763 /* Compute the value of METHODS to pass to recursive calls.
1764 Don't allow widening to be tried recursively. */
1766 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
1768 /* Look for a wider mode of the same class for which it appears we can do
1769 the operation. */
1771 if (CLASS_HAS_WIDER_MODES_P (class))
1773 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1774 wider_mode != VOIDmode;
1775 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1777 if ((binoptab->handlers[(int) wider_mode].insn_code
1778 != CODE_FOR_nothing)
1779 || (methods == OPTAB_LIB
1780 && binoptab->handlers[(int) wider_mode].libfunc))
1782 rtx xop0 = op0, xop1 = op1;
1783 int no_extend = 0;
1785 /* For certain integer operations, we need not actually extend
1786 the narrow operands, as long as we will truncate
1787 the results to the same narrowness. */
1789 if ((binoptab == ior_optab || binoptab == and_optab
1790 || binoptab == xor_optab
1791 || binoptab == add_optab || binoptab == sub_optab
1792 || binoptab == smul_optab || binoptab == ashl_optab)
1793 && class == MODE_INT)
1794 no_extend = 1;
1796 xop0 = widen_operand (xop0, wider_mode, mode,
1797 unsignedp, no_extend);
1799 /* The second operand of a shift must always be extended. */
1800 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1801 no_extend && binoptab != ashl_optab);
1803 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1804 unsignedp, methods);
1805 if (temp)
1807 if (class != MODE_INT
1808 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1809 GET_MODE_BITSIZE (wider_mode)))
1811 if (target == 0)
1812 target = gen_reg_rtx (mode);
1813 convert_move (target, temp, 0);
1814 return target;
1816 else
1817 return gen_lowpart (mode, temp);
1819 else
1820 delete_insns_since (last);
1825 delete_insns_since (entry_last);
1826 return 0;
1829 /* Expand a binary operator which has both signed and unsigned forms.
1830 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1831 signed operations.
1833 If we widen unsigned operands, we may use a signed wider operation instead
1834 of an unsigned wider operation, since the result would be the same. */
1837 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
1838 rtx op0, rtx op1, rtx target, int unsignedp,
1839 enum optab_methods methods)
1841 rtx temp;
1842 optab direct_optab = unsignedp ? uoptab : soptab;
1843 struct optab wide_soptab;
1845 /* Do it without widening, if possible. */
1846 temp = expand_binop (mode, direct_optab, op0, op1, target,
1847 unsignedp, OPTAB_DIRECT);
1848 if (temp || methods == OPTAB_DIRECT)
1849 return temp;
1851 /* Try widening to a signed int. Make a fake signed optab that
1852 hides any signed insn for direct use. */
1853 wide_soptab = *soptab;
1854 wide_soptab.handlers[(int) mode].insn_code = CODE_FOR_nothing;
1855 wide_soptab.handlers[(int) mode].libfunc = 0;
1857 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
1858 unsignedp, OPTAB_WIDEN);
1860 /* For unsigned operands, try widening to an unsigned int. */
1861 if (temp == 0 && unsignedp)
1862 temp = expand_binop (mode, uoptab, op0, op1, target,
1863 unsignedp, OPTAB_WIDEN);
1864 if (temp || methods == OPTAB_WIDEN)
1865 return temp;
1867 /* Use the right width lib call if that exists. */
1868 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
1869 if (temp || methods == OPTAB_LIB)
1870 return temp;
1872 /* Must widen and use a lib call, use either signed or unsigned. */
1873 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
1874 unsignedp, methods);
1875 if (temp != 0)
1876 return temp;
1877 if (unsignedp)
1878 return expand_binop (mode, uoptab, op0, op1, target,
1879 unsignedp, methods);
1880 return 0;
1883 /* Generate code to perform an operation specified by UNOPPTAB
1884 on operand OP0, with two results to TARG0 and TARG1.
1885 We assume that the order of the operands for the instruction
1886 is TARG0, TARG1, OP0.
1888 Either TARG0 or TARG1 may be zero, but what that means is that
1889 the result is not actually wanted. We will generate it into
1890 a dummy pseudo-reg and discard it. They may not both be zero.
1892 Returns 1 if this operation can be performed; 0 if not. */
1895 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
1896 int unsignedp)
1898 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
1899 enum mode_class class;
1900 enum machine_mode wider_mode;
1901 rtx entry_last = get_last_insn ();
1902 rtx last;
1904 class = GET_MODE_CLASS (mode);
1906 if (!targ0)
1907 targ0 = gen_reg_rtx (mode);
1908 if (!targ1)
1909 targ1 = gen_reg_rtx (mode);
1911 /* Record where to go back to if we fail. */
1912 last = get_last_insn ();
1914 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1916 int icode = (int) unoptab->handlers[(int) mode].insn_code;
1917 enum machine_mode mode0 = insn_data[icode].operand[2].mode;
1918 rtx pat;
1919 rtx xop0 = op0;
1921 if (GET_MODE (xop0) != VOIDmode
1922 && GET_MODE (xop0) != mode0)
1923 xop0 = convert_to_mode (mode0, xop0, unsignedp);
1925 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1926 if (!insn_data[icode].operand[2].predicate (xop0, mode0))
1927 xop0 = copy_to_mode_reg (mode0, xop0);
1929 /* We could handle this, but we should always be called with a pseudo
1930 for our targets and all insns should take them as outputs. */
1931 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
1932 gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
1934 pat = GEN_FCN (icode) (targ0, targ1, xop0);
1935 if (pat)
1937 emit_insn (pat);
1938 return 1;
1940 else
1941 delete_insns_since (last);
1944 /* It can't be done in this mode. Can we do it in a wider mode? */
1946 if (CLASS_HAS_WIDER_MODES_P (class))
1948 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1949 wider_mode != VOIDmode;
1950 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1952 if (unoptab->handlers[(int) wider_mode].insn_code
1953 != CODE_FOR_nothing)
1955 rtx t0 = gen_reg_rtx (wider_mode);
1956 rtx t1 = gen_reg_rtx (wider_mode);
1957 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
1959 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
1961 convert_move (targ0, t0, unsignedp);
1962 convert_move (targ1, t1, unsignedp);
1963 return 1;
1965 else
1966 delete_insns_since (last);
1971 delete_insns_since (entry_last);
1972 return 0;
1975 /* Generate code to perform an operation specified by BINOPTAB
1976 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1977 We assume that the order of the operands for the instruction
1978 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1979 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1981 Either TARG0 or TARG1 may be zero, but what that means is that
1982 the result is not actually wanted. We will generate it into
1983 a dummy pseudo-reg and discard it. They may not both be zero.
1985 Returns 1 if this operation can be performed; 0 if not. */
1988 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
1989 int unsignedp)
1991 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
1992 enum mode_class class;
1993 enum machine_mode wider_mode;
1994 rtx entry_last = get_last_insn ();
1995 rtx last;
1997 class = GET_MODE_CLASS (mode);
1999 /* If we are inside an appropriately-short loop and we are optimizing,
2000 force expensive constants into a register. */
2001 if (CONSTANT_P (op0) && optimize
2002 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
2003 op0 = force_reg (mode, op0);
2005 if (CONSTANT_P (op1) && optimize
2006 && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
2007 op1 = force_reg (mode, op1);
2009 if (!targ0)
2010 targ0 = gen_reg_rtx (mode);
2011 if (!targ1)
2012 targ1 = gen_reg_rtx (mode);
2014 /* Record where to go back to if we fail. */
2015 last = get_last_insn ();
2017 if (binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2019 int icode = (int) binoptab->handlers[(int) mode].insn_code;
2020 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2021 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
2022 rtx pat;
2023 rtx xop0 = op0, xop1 = op1;
2025 /* In case the insn wants input operands in modes different from
2026 those of the actual operands, convert the operands. It would
2027 seem that we don't need to convert CONST_INTs, but we do, so
2028 that they're properly zero-extended, sign-extended or truncated
2029 for their mode. */
2031 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
2032 xop0 = convert_modes (mode0,
2033 GET_MODE (op0) != VOIDmode
2034 ? GET_MODE (op0)
2035 : mode,
2036 xop0, unsignedp);
2038 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
2039 xop1 = convert_modes (mode1,
2040 GET_MODE (op1) != VOIDmode
2041 ? GET_MODE (op1)
2042 : mode,
2043 xop1, unsignedp);
2045 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2046 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2047 xop0 = copy_to_mode_reg (mode0, xop0);
2049 if (!insn_data[icode].operand[2].predicate (xop1, mode1))
2050 xop1 = copy_to_mode_reg (mode1, xop1);
2052 /* We could handle this, but we should always be called with a pseudo
2053 for our targets and all insns should take them as outputs. */
2054 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2055 gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
2057 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
2058 if (pat)
2060 emit_insn (pat);
2061 return 1;
2063 else
2064 delete_insns_since (last);
2067 /* It can't be done in this mode. Can we do it in a wider mode? */
2069 if (CLASS_HAS_WIDER_MODES_P (class))
2071 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2072 wider_mode != VOIDmode;
2073 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2075 if (binoptab->handlers[(int) wider_mode].insn_code
2076 != CODE_FOR_nothing)
2078 rtx t0 = gen_reg_rtx (wider_mode);
2079 rtx t1 = gen_reg_rtx (wider_mode);
2080 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2081 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2083 if (expand_twoval_binop (binoptab, cop0, cop1,
2084 t0, t1, unsignedp))
2086 convert_move (targ0, t0, unsignedp);
2087 convert_move (targ1, t1, unsignedp);
2088 return 1;
2090 else
2091 delete_insns_since (last);
2096 delete_insns_since (entry_last);
2097 return 0;
2100 /* Expand the two-valued library call indicated by BINOPTAB, but
2101 preserve only one of the values. If TARG0 is non-NULL, the first
2102 value is placed into TARG0; otherwise the second value is placed
2103 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2104 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2105 This routine assumes that the value returned by the library call is
2106 as if the return value was of an integral mode twice as wide as the
2107 mode of OP0. Returns 1 if the call was successful. */
2109 bool
2110 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2111 rtx targ0, rtx targ1, enum rtx_code code)
2113 enum machine_mode mode;
2114 enum machine_mode libval_mode;
2115 rtx libval;
2116 rtx insns;
2118 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2119 gcc_assert (!targ0 != !targ1);
2121 mode = GET_MODE (op0);
2122 if (!binoptab->handlers[(int) mode].libfunc)
2123 return false;
2125 /* The value returned by the library function will have twice as
2126 many bits as the nominal MODE. */
2127 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2128 MODE_INT);
2129 start_sequence ();
2130 libval = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
2131 NULL_RTX, LCT_CONST,
2132 libval_mode, 2,
2133 op0, mode,
2134 op1, mode);
2135 /* Get the part of VAL containing the value that we want. */
2136 libval = simplify_gen_subreg (mode, libval, libval_mode,
2137 targ0 ? 0 : GET_MODE_SIZE (mode));
2138 insns = get_insns ();
2139 end_sequence ();
2140 /* Move the into the desired location. */
2141 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2142 gen_rtx_fmt_ee (code, mode, op0, op1));
2144 return true;
2148 /* Wrapper around expand_unop which takes an rtx code to specify
2149 the operation to perform, not an optab pointer. All other
2150 arguments are the same. */
2152 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2153 rtx target, int unsignedp)
2155 optab unop = code_to_optab[(int) code];
2156 gcc_assert (unop);
2158 return expand_unop (mode, unop, op0, target, unsignedp);
2161 /* Try calculating
2162 (clz:narrow x)
2164 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2165 static rtx
2166 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2168 enum mode_class class = GET_MODE_CLASS (mode);
2169 if (CLASS_HAS_WIDER_MODES_P (class))
2171 enum machine_mode wider_mode;
2172 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2173 wider_mode != VOIDmode;
2174 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2176 if (clz_optab->handlers[(int) wider_mode].insn_code
2177 != CODE_FOR_nothing)
2179 rtx xop0, temp, last;
2181 last = get_last_insn ();
2183 if (target == 0)
2184 target = gen_reg_rtx (mode);
2185 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2186 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2187 if (temp != 0)
2188 temp = expand_binop (wider_mode, sub_optab, temp,
2189 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2190 - GET_MODE_BITSIZE (mode)),
2191 target, true, OPTAB_DIRECT);
2192 if (temp == 0)
2193 delete_insns_since (last);
2195 return temp;
2199 return 0;
2202 /* Try calculating (parity x) as (and (popcount x) 1), where
2203 popcount can also be done in a wider mode. */
2204 static rtx
2205 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2207 enum mode_class class = GET_MODE_CLASS (mode);
2208 if (CLASS_HAS_WIDER_MODES_P (class))
2210 enum machine_mode wider_mode;
2211 for (wider_mode = mode; wider_mode != VOIDmode;
2212 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2214 if (popcount_optab->handlers[(int) wider_mode].insn_code
2215 != CODE_FOR_nothing)
2217 rtx xop0, temp, last;
2219 last = get_last_insn ();
2221 if (target == 0)
2222 target = gen_reg_rtx (mode);
2223 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2224 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2225 true);
2226 if (temp != 0)
2227 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2228 target, true, OPTAB_DIRECT);
2229 if (temp == 0)
2230 delete_insns_since (last);
2232 return temp;
2236 return 0;
2239 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2240 conditions, VAL may already be a SUBREG against which we cannot generate
2241 a further SUBREG. In this case, we expect forcing the value into a
2242 register will work around the situation. */
2244 static rtx
2245 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2246 enum machine_mode imode)
2248 rtx ret;
2249 ret = lowpart_subreg (omode, val, imode);
2250 if (ret == NULL)
2252 val = force_reg (imode, val);
2253 ret = lowpart_subreg (omode, val, imode);
2254 gcc_assert (ret != NULL);
2256 return ret;
2259 /* Expand a floating point absolute value or negation operation via a
2260 logical operation on the sign bit. */
2262 static rtx
2263 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2264 rtx op0, rtx target)
2266 const struct real_format *fmt;
2267 int bitpos, word, nwords, i;
2268 enum machine_mode imode;
2269 HOST_WIDE_INT hi, lo;
2270 rtx temp, insns;
2272 /* The format has to have a simple sign bit. */
2273 fmt = REAL_MODE_FORMAT (mode);
2274 if (fmt == NULL)
2275 return NULL_RTX;
2277 bitpos = fmt->signbit_rw;
2278 if (bitpos < 0)
2279 return NULL_RTX;
2281 /* Don't create negative zeros if the format doesn't support them. */
2282 if (code == NEG && !fmt->has_signed_zero)
2283 return NULL_RTX;
2285 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2287 imode = int_mode_for_mode (mode);
2288 if (imode == BLKmode)
2289 return NULL_RTX;
2290 word = 0;
2291 nwords = 1;
2293 else
2295 imode = word_mode;
2297 if (FLOAT_WORDS_BIG_ENDIAN)
2298 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2299 else
2300 word = bitpos / BITS_PER_WORD;
2301 bitpos = bitpos % BITS_PER_WORD;
2302 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2305 if (bitpos < HOST_BITS_PER_WIDE_INT)
2307 hi = 0;
2308 lo = (HOST_WIDE_INT) 1 << bitpos;
2310 else
2312 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2313 lo = 0;
2315 if (code == ABS)
2316 lo = ~lo, hi = ~hi;
2318 if (target == 0 || target == op0)
2319 target = gen_reg_rtx (mode);
2321 if (nwords > 1)
2323 start_sequence ();
2325 for (i = 0; i < nwords; ++i)
2327 rtx targ_piece = operand_subword (target, i, 1, mode);
2328 rtx op0_piece = operand_subword_force (op0, i, mode);
2330 if (i == word)
2332 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2333 op0_piece,
2334 immed_double_const (lo, hi, imode),
2335 targ_piece, 1, OPTAB_LIB_WIDEN);
2336 if (temp != targ_piece)
2337 emit_move_insn (targ_piece, temp);
2339 else
2340 emit_move_insn (targ_piece, op0_piece);
2343 insns = get_insns ();
2344 end_sequence ();
2346 temp = gen_rtx_fmt_e (code, mode, copy_rtx (op0));
2347 emit_no_conflict_block (insns, target, op0, NULL_RTX, temp);
2349 else
2351 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2352 gen_lowpart (imode, op0),
2353 immed_double_const (lo, hi, imode),
2354 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2355 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2357 set_unique_reg_note (get_last_insn (), REG_EQUAL,
2358 gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
2361 return target;
2364 /* Generate code to perform an operation specified by UNOPTAB
2365 on operand OP0, with result having machine-mode MODE.
2367 UNSIGNEDP is for the case where we have to widen the operands
2368 to perform the operation. It says to use zero-extension.
2370 If TARGET is nonzero, the value
2371 is generated there, if it is convenient to do so.
2372 In all cases an rtx is returned for the locus of the value;
2373 this may or may not be TARGET. */
2376 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
2377 int unsignedp)
2379 enum mode_class class;
2380 enum machine_mode wider_mode;
2381 rtx temp;
2382 rtx last = get_last_insn ();
2383 rtx pat;
2385 class = GET_MODE_CLASS (mode);
2387 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2389 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2390 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2391 rtx xop0 = op0;
2393 if (target)
2394 temp = target;
2395 else
2396 temp = gen_reg_rtx (mode);
2398 if (GET_MODE (xop0) != VOIDmode
2399 && GET_MODE (xop0) != mode0)
2400 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2402 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2404 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2405 xop0 = copy_to_mode_reg (mode0, xop0);
2407 if (!insn_data[icode].operand[0].predicate (temp, mode))
2408 temp = gen_reg_rtx (mode);
2410 pat = GEN_FCN (icode) (temp, xop0);
2411 if (pat)
2413 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2414 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
2416 delete_insns_since (last);
2417 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2420 emit_insn (pat);
2422 return temp;
2424 else
2425 delete_insns_since (last);
2428 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2430 /* Widening clz needs special treatment. */
2431 if (unoptab == clz_optab)
2433 temp = widen_clz (mode, op0, target);
2434 if (temp)
2435 return temp;
2436 else
2437 goto try_libcall;
2440 if (CLASS_HAS_WIDER_MODES_P (class))
2441 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2442 wider_mode != VOIDmode;
2443 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2445 if (unoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing)
2447 rtx xop0 = op0;
2449 /* For certain operations, we need not actually extend
2450 the narrow operand, as long as we will truncate the
2451 results to the same narrowness. */
2453 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2454 (unoptab == neg_optab
2455 || unoptab == one_cmpl_optab)
2456 && class == MODE_INT);
2458 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2459 unsignedp);
2461 if (temp)
2463 if (class != MODE_INT
2464 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2465 GET_MODE_BITSIZE (wider_mode)))
2467 if (target == 0)
2468 target = gen_reg_rtx (mode);
2469 convert_move (target, temp, 0);
2470 return target;
2472 else
2473 return gen_lowpart (mode, temp);
2475 else
2476 delete_insns_since (last);
2480 /* These can be done a word at a time. */
2481 if (unoptab == one_cmpl_optab
2482 && class == MODE_INT
2483 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
2484 && unoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
2486 int i;
2487 rtx insns;
2489 if (target == 0 || target == op0)
2490 target = gen_reg_rtx (mode);
2492 start_sequence ();
2494 /* Do the actual arithmetic. */
2495 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
2497 rtx target_piece = operand_subword (target, i, 1, mode);
2498 rtx x = expand_unop (word_mode, unoptab,
2499 operand_subword_force (op0, i, mode),
2500 target_piece, unsignedp);
2502 if (target_piece != x)
2503 emit_move_insn (target_piece, x);
2506 insns = get_insns ();
2507 end_sequence ();
2509 emit_no_conflict_block (insns, target, op0, NULL_RTX,
2510 gen_rtx_fmt_e (unoptab->code, mode,
2511 copy_rtx (op0)));
2512 return target;
2515 if (unoptab->code == NEG)
2517 /* Try negating floating point values by flipping the sign bit. */
2518 if (SCALAR_FLOAT_MODE_P (mode))
2520 temp = expand_absneg_bit (NEG, mode, op0, target);
2521 if (temp)
2522 return temp;
2525 /* If there is no negation pattern, and we have no negative zero,
2526 try subtracting from zero. */
2527 if (!HONOR_SIGNED_ZEROS (mode))
2529 temp = expand_binop (mode, (unoptab == negv_optab
2530 ? subv_optab : sub_optab),
2531 CONST0_RTX (mode), op0, target,
2532 unsignedp, OPTAB_DIRECT);
2533 if (temp)
2534 return temp;
2538 /* Try calculating parity (x) as popcount (x) % 2. */
2539 if (unoptab == parity_optab)
2541 temp = expand_parity (mode, op0, target);
2542 if (temp)
2543 return temp;
2546 try_libcall:
2547 /* Now try a library call in this mode. */
2548 if (unoptab->handlers[(int) mode].libfunc)
2550 rtx insns;
2551 rtx value;
2552 enum machine_mode outmode = mode;
2554 /* All of these functions return small values. Thus we choose to
2555 have them return something that isn't a double-word. */
2556 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
2557 || unoptab == popcount_optab || unoptab == parity_optab)
2558 outmode
2559 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node)));
2561 start_sequence ();
2563 /* Pass 1 for NO_QUEUE so we don't lose any increments
2564 if the libcall is cse'd or moved. */
2565 value = emit_library_call_value (unoptab->handlers[(int) mode].libfunc,
2566 NULL_RTX, LCT_CONST, outmode,
2567 1, op0, mode);
2568 insns = get_insns ();
2569 end_sequence ();
2571 target = gen_reg_rtx (outmode);
2572 emit_libcall_block (insns, target, value,
2573 gen_rtx_fmt_e (unoptab->code, mode, op0));
2575 return target;
2578 /* It can't be done in this mode. Can we do it in a wider mode? */
2580 if (CLASS_HAS_WIDER_MODES_P (class))
2582 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2583 wider_mode != VOIDmode;
2584 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2586 if ((unoptab->handlers[(int) wider_mode].insn_code
2587 != CODE_FOR_nothing)
2588 || unoptab->handlers[(int) wider_mode].libfunc)
2590 rtx xop0 = op0;
2592 /* For certain operations, we need not actually extend
2593 the narrow operand, as long as we will truncate the
2594 results to the same narrowness. */
2596 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2597 (unoptab == neg_optab
2598 || unoptab == one_cmpl_optab)
2599 && class == MODE_INT);
2601 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2602 unsignedp);
2604 /* If we are generating clz using wider mode, adjust the
2605 result. */
2606 if (unoptab == clz_optab && temp != 0)
2607 temp = expand_binop (wider_mode, sub_optab, temp,
2608 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2609 - GET_MODE_BITSIZE (mode)),
2610 target, true, OPTAB_DIRECT);
2612 if (temp)
2614 if (class != MODE_INT)
2616 if (target == 0)
2617 target = gen_reg_rtx (mode);
2618 convert_move (target, temp, 0);
2619 return target;
2621 else
2622 return gen_lowpart (mode, temp);
2624 else
2625 delete_insns_since (last);
2630 /* One final attempt at implementing negation via subtraction,
2631 this time allowing widening of the operand. */
2632 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
2634 rtx temp;
2635 temp = expand_binop (mode,
2636 unoptab == negv_optab ? subv_optab : sub_optab,
2637 CONST0_RTX (mode), op0,
2638 target, unsignedp, OPTAB_LIB_WIDEN);
2639 if (temp)
2640 return temp;
2643 return 0;
2646 /* Emit code to compute the absolute value of OP0, with result to
2647 TARGET if convenient. (TARGET may be 0.) The return value says
2648 where the result actually is to be found.
2650 MODE is the mode of the operand; the mode of the result is
2651 different but can be deduced from MODE.
2656 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
2657 int result_unsignedp)
2659 rtx temp;
2661 if (! flag_trapv)
2662 result_unsignedp = 1;
2664 /* First try to do it with a special abs instruction. */
2665 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
2666 op0, target, 0);
2667 if (temp != 0)
2668 return temp;
2670 /* For floating point modes, try clearing the sign bit. */
2671 if (SCALAR_FLOAT_MODE_P (mode))
2673 temp = expand_absneg_bit (ABS, mode, op0, target);
2674 if (temp)
2675 return temp;
2678 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2679 if (smax_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
2680 && !HONOR_SIGNED_ZEROS (mode))
2682 rtx last = get_last_insn ();
2684 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
2685 if (temp != 0)
2686 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
2687 OPTAB_WIDEN);
2689 if (temp != 0)
2690 return temp;
2692 delete_insns_since (last);
2695 /* If this machine has expensive jumps, we can do integer absolute
2696 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2697 where W is the width of MODE. */
2699 if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
2701 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
2702 size_int (GET_MODE_BITSIZE (mode) - 1),
2703 NULL_RTX, 0);
2705 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
2706 OPTAB_LIB_WIDEN);
2707 if (temp != 0)
2708 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
2709 temp, extended, target, 0, OPTAB_LIB_WIDEN);
2711 if (temp != 0)
2712 return temp;
2715 return NULL_RTX;
2719 expand_abs (enum machine_mode mode, rtx op0, rtx target,
2720 int result_unsignedp, int safe)
2722 rtx temp, op1;
2724 if (! flag_trapv)
2725 result_unsignedp = 1;
2727 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
2728 if (temp != 0)
2729 return temp;
2731 /* If that does not win, use conditional jump and negate. */
2733 /* It is safe to use the target if it is the same
2734 as the source if this is also a pseudo register */
2735 if (op0 == target && REG_P (op0)
2736 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
2737 safe = 1;
2739 op1 = gen_label_rtx ();
2740 if (target == 0 || ! safe
2741 || GET_MODE (target) != mode
2742 || (MEM_P (target) && MEM_VOLATILE_P (target))
2743 || (REG_P (target)
2744 && REGNO (target) < FIRST_PSEUDO_REGISTER))
2745 target = gen_reg_rtx (mode);
2747 emit_move_insn (target, op0);
2748 NO_DEFER_POP;
2750 /* If this mode is an integer too wide to compare properly,
2751 compare word by word. Rely on CSE to optimize constant cases. */
2752 if (GET_MODE_CLASS (mode) == MODE_INT
2753 && ! can_compare_p (GE, mode, ccp_jump))
2754 do_jump_by_parts_greater_rtx (mode, 0, target, const0_rtx,
2755 NULL_RTX, op1);
2756 else
2757 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
2758 NULL_RTX, NULL_RTX, op1);
2760 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
2761 target, target, 0);
2762 if (op0 != target)
2763 emit_move_insn (target, op0);
2764 emit_label (op1);
2765 OK_DEFER_POP;
2766 return target;
2769 /* A subroutine of expand_copysign, perform the copysign operation using the
2770 abs and neg primitives advertised to exist on the target. The assumption
2771 is that we have a split register file, and leaving op0 in fp registers,
2772 and not playing with subregs so much, will help the register allocator. */
2774 static rtx
2775 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
2776 int bitpos, bool op0_is_abs)
2778 enum machine_mode imode;
2779 HOST_WIDE_INT hi, lo;
2780 int word;
2781 rtx label;
2783 if (target == op1)
2784 target = NULL_RTX;
2786 if (!op0_is_abs)
2788 op0 = expand_unop (mode, abs_optab, op0, target, 0);
2789 if (op0 == NULL)
2790 return NULL_RTX;
2791 target = op0;
2793 else
2795 if (target == NULL_RTX)
2796 target = copy_to_reg (op0);
2797 else
2798 emit_move_insn (target, op0);
2801 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2803 imode = int_mode_for_mode (mode);
2804 if (imode == BLKmode)
2805 return NULL_RTX;
2806 op1 = gen_lowpart (imode, op1);
2808 else
2810 imode = word_mode;
2811 if (FLOAT_WORDS_BIG_ENDIAN)
2812 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2813 else
2814 word = bitpos / BITS_PER_WORD;
2815 bitpos = bitpos % BITS_PER_WORD;
2816 op1 = operand_subword_force (op1, word, mode);
2819 if (bitpos < HOST_BITS_PER_WIDE_INT)
2821 hi = 0;
2822 lo = (HOST_WIDE_INT) 1 << bitpos;
2824 else
2826 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2827 lo = 0;
2830 op1 = expand_binop (imode, and_optab, op1,
2831 immed_double_const (lo, hi, imode),
2832 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2834 label = gen_label_rtx ();
2835 emit_cmp_and_jump_insns (op1, const0_rtx, EQ, NULL_RTX, imode, 1, label);
2837 if (GET_CODE (op0) == CONST_DOUBLE)
2838 op0 = simplify_unary_operation (NEG, mode, op0, mode);
2839 else
2840 op0 = expand_unop (mode, neg_optab, op0, target, 0);
2841 if (op0 != target)
2842 emit_move_insn (target, op0);
2844 emit_label (label);
2846 return target;
2850 /* A subroutine of expand_copysign, perform the entire copysign operation
2851 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
2852 is true if op0 is known to have its sign bit clear. */
2854 static rtx
2855 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
2856 int bitpos, bool op0_is_abs)
2858 enum machine_mode imode;
2859 HOST_WIDE_INT hi, lo;
2860 int word, nwords, i;
2861 rtx temp, insns;
2863 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2865 imode = int_mode_for_mode (mode);
2866 if (imode == BLKmode)
2867 return NULL_RTX;
2868 word = 0;
2869 nwords = 1;
2871 else
2873 imode = word_mode;
2875 if (FLOAT_WORDS_BIG_ENDIAN)
2876 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2877 else
2878 word = bitpos / BITS_PER_WORD;
2879 bitpos = bitpos % BITS_PER_WORD;
2880 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2883 if (bitpos < HOST_BITS_PER_WIDE_INT)
2885 hi = 0;
2886 lo = (HOST_WIDE_INT) 1 << bitpos;
2888 else
2890 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2891 lo = 0;
2894 if (target == 0 || target == op0 || target == op1)
2895 target = gen_reg_rtx (mode);
2897 if (nwords > 1)
2899 start_sequence ();
2901 for (i = 0; i < nwords; ++i)
2903 rtx targ_piece = operand_subword (target, i, 1, mode);
2904 rtx op0_piece = operand_subword_force (op0, i, mode);
2906 if (i == word)
2908 if (!op0_is_abs)
2909 op0_piece = expand_binop (imode, and_optab, op0_piece,
2910 immed_double_const (~lo, ~hi, imode),
2911 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2913 op1 = expand_binop (imode, and_optab,
2914 operand_subword_force (op1, i, mode),
2915 immed_double_const (lo, hi, imode),
2916 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2918 temp = expand_binop (imode, ior_optab, op0_piece, op1,
2919 targ_piece, 1, OPTAB_LIB_WIDEN);
2920 if (temp != targ_piece)
2921 emit_move_insn (targ_piece, temp);
2923 else
2924 emit_move_insn (targ_piece, op0_piece);
2927 insns = get_insns ();
2928 end_sequence ();
2930 emit_no_conflict_block (insns, target, op0, op1, NULL_RTX);
2932 else
2934 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
2935 immed_double_const (lo, hi, imode),
2936 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2938 op0 = gen_lowpart (imode, op0);
2939 if (!op0_is_abs)
2940 op0 = expand_binop (imode, and_optab, op0,
2941 immed_double_const (~lo, ~hi, imode),
2942 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2944 temp = expand_binop (imode, ior_optab, op0, op1,
2945 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2946 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2949 return target;
2952 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
2953 scalar floating point mode. Return NULL if we do not know how to
2954 expand the operation inline. */
2957 expand_copysign (rtx op0, rtx op1, rtx target)
2959 enum machine_mode mode = GET_MODE (op0);
2960 const struct real_format *fmt;
2961 bool op0_is_abs;
2962 rtx temp;
2964 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
2965 gcc_assert (GET_MODE (op1) == mode);
2967 /* First try to do it with a special instruction. */
2968 temp = expand_binop (mode, copysign_optab, op0, op1,
2969 target, 0, OPTAB_DIRECT);
2970 if (temp)
2971 return temp;
2973 fmt = REAL_MODE_FORMAT (mode);
2974 if (fmt == NULL || !fmt->has_signed_zero)
2975 return NULL_RTX;
2977 op0_is_abs = false;
2978 if (GET_CODE (op0) == CONST_DOUBLE)
2980 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
2981 op0 = simplify_unary_operation (ABS, mode, op0, mode);
2982 op0_is_abs = true;
2985 if (fmt->signbit_ro >= 0
2986 && (GET_CODE (op0) == CONST_DOUBLE
2987 || (neg_optab->handlers[mode].insn_code != CODE_FOR_nothing
2988 && abs_optab->handlers[mode].insn_code != CODE_FOR_nothing)))
2990 temp = expand_copysign_absneg (mode, op0, op1, target,
2991 fmt->signbit_ro, op0_is_abs);
2992 if (temp)
2993 return temp;
2996 if (fmt->signbit_rw < 0)
2997 return NULL_RTX;
2998 return expand_copysign_bit (mode, op0, op1, target,
2999 fmt->signbit_rw, op0_is_abs);
3002 /* Generate an instruction whose insn-code is INSN_CODE,
3003 with two operands: an output TARGET and an input OP0.
3004 TARGET *must* be nonzero, and the output is always stored there.
3005 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3006 the value that is stored into TARGET. */
3008 void
3009 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
3011 rtx temp;
3012 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3013 rtx pat;
3015 temp = target;
3017 /* Now, if insn does not accept our operands, put them into pseudos. */
3019 if (!insn_data[icode].operand[1].predicate (op0, mode0))
3020 op0 = copy_to_mode_reg (mode0, op0);
3022 if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp)))
3023 temp = gen_reg_rtx (GET_MODE (temp));
3025 pat = GEN_FCN (icode) (temp, op0);
3027 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3028 add_equal_note (pat, temp, code, op0, NULL_RTX);
3030 emit_insn (pat);
3032 if (temp != target)
3033 emit_move_insn (target, temp);
3036 struct no_conflict_data
3038 rtx target, first, insn;
3039 bool must_stay;
3042 /* Called via note_stores by emit_no_conflict_block and emit_libcall_block.
3043 Set P->must_stay if the currently examined clobber / store has to stay
3044 in the list of insns that constitute the actual no_conflict block /
3045 libcall block. */
3046 static void
3047 no_conflict_move_test (rtx dest, rtx set, void *p0)
3049 struct no_conflict_data *p= p0;
3051 /* If this inns directly contributes to setting the target, it must stay. */
3052 if (reg_overlap_mentioned_p (p->target, dest))
3053 p->must_stay = true;
3054 /* If we haven't committed to keeping any other insns in the list yet,
3055 there is nothing more to check. */
3056 else if (p->insn == p->first)
3057 return;
3058 /* If this insn sets / clobbers a register that feeds one of the insns
3059 already in the list, this insn has to stay too. */
3060 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3061 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3062 || reg_used_between_p (dest, p->first, p->insn)
3063 /* Likewise if this insn depends on a register set by a previous
3064 insn in the list, or if it sets a result (presumably a hard
3065 register) that is set or clobbered by a previous insn.
3066 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3067 SET_DEST perform the former check on the address, and the latter
3068 check on the MEM. */
3069 || (GET_CODE (set) == SET
3070 && (modified_in_p (SET_SRC (set), p->first)
3071 || modified_in_p (SET_DEST (set), p->first)
3072 || modified_between_p (SET_SRC (set), p->first, p->insn)
3073 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3074 p->must_stay = true;
3077 /* Emit code to perform a series of operations on a multi-word quantity, one
3078 word at a time.
3080 Such a block is preceded by a CLOBBER of the output, consists of multiple
3081 insns, each setting one word of the output, and followed by a SET copying
3082 the output to itself.
3084 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3085 note indicating that it doesn't conflict with the (also multi-word)
3086 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3087 notes.
3089 INSNS is a block of code generated to perform the operation, not including
3090 the CLOBBER and final copy. All insns that compute intermediate values
3091 are first emitted, followed by the block as described above.
3093 TARGET, OP0, and OP1 are the output and inputs of the operations,
3094 respectively. OP1 may be zero for a unary operation.
3096 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3097 on the last insn.
3099 If TARGET is not a register, INSNS is simply emitted with no special
3100 processing. Likewise if anything in INSNS is not an INSN or if
3101 there is a libcall block inside INSNS.
3103 The final insn emitted is returned. */
3106 emit_no_conflict_block (rtx insns, rtx target, rtx op0, rtx op1, rtx equiv)
3108 rtx prev, next, first, last, insn;
3110 if (!REG_P (target) || reload_in_progress)
3111 return emit_insn (insns);
3112 else
3113 for (insn = insns; insn; insn = NEXT_INSN (insn))
3114 if (!NONJUMP_INSN_P (insn)
3115 || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
3116 return emit_insn (insns);
3118 /* First emit all insns that do not store into words of the output and remove
3119 these from the list. */
3120 for (insn = insns; insn; insn = next)
3122 rtx note;
3123 struct no_conflict_data data;
3125 next = NEXT_INSN (insn);
3127 /* Some ports (cris) create a libcall regions at their own. We must
3128 avoid any potential nesting of LIBCALLs. */
3129 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3130 remove_note (insn, note);
3131 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3132 remove_note (insn, note);
3134 data.target = target;
3135 data.first = insns;
3136 data.insn = insn;
3137 data.must_stay = 0;
3138 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3139 if (! data.must_stay)
3141 if (PREV_INSN (insn))
3142 NEXT_INSN (PREV_INSN (insn)) = next;
3143 else
3144 insns = next;
3146 if (next)
3147 PREV_INSN (next) = PREV_INSN (insn);
3149 add_insn (insn);
3153 prev = get_last_insn ();
3155 /* Now write the CLOBBER of the output, followed by the setting of each
3156 of the words, followed by the final copy. */
3157 if (target != op0 && target != op1)
3158 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
3160 for (insn = insns; insn; insn = next)
3162 next = NEXT_INSN (insn);
3163 add_insn (insn);
3165 if (op1 && REG_P (op1))
3166 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op1,
3167 REG_NOTES (insn));
3169 if (op0 && REG_P (op0))
3170 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op0,
3171 REG_NOTES (insn));
3174 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3175 != CODE_FOR_nothing)
3177 last = emit_move_insn (target, target);
3178 if (equiv)
3179 set_unique_reg_note (last, REG_EQUAL, equiv);
3181 else
3183 last = get_last_insn ();
3185 /* Remove any existing REG_EQUAL note from "last", or else it will
3186 be mistaken for a note referring to the full contents of the
3187 alleged libcall value when found together with the REG_RETVAL
3188 note added below. An existing note can come from an insn
3189 expansion at "last". */
3190 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3193 if (prev == 0)
3194 first = get_insns ();
3195 else
3196 first = NEXT_INSN (prev);
3198 /* Encapsulate the block so it gets manipulated as a unit. */
3199 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3200 REG_NOTES (first));
3201 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first, REG_NOTES (last));
3203 return last;
3206 /* Emit code to make a call to a constant function or a library call.
3208 INSNS is a list containing all insns emitted in the call.
3209 These insns leave the result in RESULT. Our block is to copy RESULT
3210 to TARGET, which is logically equivalent to EQUIV.
3212 We first emit any insns that set a pseudo on the assumption that these are
3213 loading constants into registers; doing so allows them to be safely cse'ed
3214 between blocks. Then we emit all the other insns in the block, followed by
3215 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3216 note with an operand of EQUIV.
3218 Moving assignments to pseudos outside of the block is done to improve
3219 the generated code, but is not required to generate correct code,
3220 hence being unable to move an assignment is not grounds for not making
3221 a libcall block. There are two reasons why it is safe to leave these
3222 insns inside the block: First, we know that these pseudos cannot be
3223 used in generated RTL outside the block since they are created for
3224 temporary purposes within the block. Second, CSE will not record the
3225 values of anything set inside a libcall block, so we know they must
3226 be dead at the end of the block.
3228 Except for the first group of insns (the ones setting pseudos), the
3229 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3231 void
3232 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3234 rtx final_dest = target;
3235 rtx prev, next, first, last, insn;
3237 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3238 into a MEM later. Protect the libcall block from this change. */
3239 if (! REG_P (target) || REG_USERVAR_P (target))
3240 target = gen_reg_rtx (GET_MODE (target));
3242 /* If we're using non-call exceptions, a libcall corresponding to an
3243 operation that may trap may also trap. */
3244 if (flag_non_call_exceptions && may_trap_p (equiv))
3246 for (insn = insns; insn; insn = NEXT_INSN (insn))
3247 if (CALL_P (insn))
3249 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3251 if (note != 0 && INTVAL (XEXP (note, 0)) <= 0)
3252 remove_note (insn, note);
3255 else
3256 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3257 reg note to indicate that this call cannot throw or execute a nonlocal
3258 goto (unless there is already a REG_EH_REGION note, in which case
3259 we update it). */
3260 for (insn = insns; insn; insn = NEXT_INSN (insn))
3261 if (CALL_P (insn))
3263 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3265 if (note != 0)
3266 XEXP (note, 0) = constm1_rtx;
3267 else
3268 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx,
3269 REG_NOTES (insn));
3272 /* First emit all insns that set pseudos. Remove them from the list as
3273 we go. Avoid insns that set pseudos which were referenced in previous
3274 insns. These can be generated by move_by_pieces, for example,
3275 to update an address. Similarly, avoid insns that reference things
3276 set in previous insns. */
3278 for (insn = insns; insn; insn = next)
3280 rtx set = single_set (insn);
3281 rtx note;
3283 /* Some ports (cris) create a libcall regions at their own. We must
3284 avoid any potential nesting of LIBCALLs. */
3285 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3286 remove_note (insn, note);
3287 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3288 remove_note (insn, note);
3290 next = NEXT_INSN (insn);
3292 if (set != 0 && REG_P (SET_DEST (set))
3293 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3295 struct no_conflict_data data;
3297 data.target = const0_rtx;
3298 data.first = insns;
3299 data.insn = insn;
3300 data.must_stay = 0;
3301 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3302 if (! data.must_stay)
3304 if (PREV_INSN (insn))
3305 NEXT_INSN (PREV_INSN (insn)) = next;
3306 else
3307 insns = next;
3309 if (next)
3310 PREV_INSN (next) = PREV_INSN (insn);
3312 add_insn (insn);
3316 /* Some ports use a loop to copy large arguments onto the stack.
3317 Don't move anything outside such a loop. */
3318 if (LABEL_P (insn))
3319 break;
3322 prev = get_last_insn ();
3324 /* Write the remaining insns followed by the final copy. */
3326 for (insn = insns; insn; insn = next)
3328 next = NEXT_INSN (insn);
3330 add_insn (insn);
3333 last = emit_move_insn (target, result);
3334 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3335 != CODE_FOR_nothing)
3336 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
3337 else
3339 /* Remove any existing REG_EQUAL note from "last", or else it will
3340 be mistaken for a note referring to the full contents of the
3341 libcall value when found together with the REG_RETVAL note added
3342 below. An existing note can come from an insn expansion at
3343 "last". */
3344 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3347 if (final_dest != target)
3348 emit_move_insn (final_dest, target);
3350 if (prev == 0)
3351 first = get_insns ();
3352 else
3353 first = NEXT_INSN (prev);
3355 /* Encapsulate the block so it gets manipulated as a unit. */
3356 if (!flag_non_call_exceptions || !may_trap_p (equiv))
3358 /* We can't attach the REG_LIBCALL and REG_RETVAL notes
3359 when the encapsulated region would not be in one basic block,
3360 i.e. when there is a control_flow_insn_p insn between FIRST and LAST.
3362 bool attach_libcall_retval_notes = true;
3363 next = NEXT_INSN (last);
3364 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3365 if (control_flow_insn_p (insn))
3367 attach_libcall_retval_notes = false;
3368 break;
3371 if (attach_libcall_retval_notes)
3373 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3374 REG_NOTES (first));
3375 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first,
3376 REG_NOTES (last));
3381 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3382 PURPOSE describes how this comparison will be used. CODE is the rtx
3383 comparison code we will be using.
3385 ??? Actually, CODE is slightly weaker than that. A target is still
3386 required to implement all of the normal bcc operations, but not
3387 required to implement all (or any) of the unordered bcc operations. */
3390 can_compare_p (enum rtx_code code, enum machine_mode mode,
3391 enum can_compare_purpose purpose)
3395 if (cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3397 if (purpose == ccp_jump)
3398 return bcc_gen_fctn[(int) code] != NULL;
3399 else if (purpose == ccp_store_flag)
3400 return setcc_gen_code[(int) code] != CODE_FOR_nothing;
3401 else
3402 /* There's only one cmov entry point, and it's allowed to fail. */
3403 return 1;
3405 if (purpose == ccp_jump
3406 && cbranch_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3407 return 1;
3408 if (purpose == ccp_cmov
3409 && cmov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3410 return 1;
3411 if (purpose == ccp_store_flag
3412 && cstore_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3413 return 1;
3414 mode = GET_MODE_WIDER_MODE (mode);
3416 while (mode != VOIDmode);
3418 return 0;
3421 /* This function is called when we are going to emit a compare instruction that
3422 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3424 *PMODE is the mode of the inputs (in case they are const_int).
3425 *PUNSIGNEDP nonzero says that the operands are unsigned;
3426 this matters if they need to be widened.
3428 If they have mode BLKmode, then SIZE specifies the size of both operands.
3430 This function performs all the setup necessary so that the caller only has
3431 to emit a single comparison insn. This setup can involve doing a BLKmode
3432 comparison or emitting a library call to perform the comparison if no insn
3433 is available to handle it.
3434 The values which are passed in through pointers can be modified; the caller
3435 should perform the comparison on the modified values. Constant
3436 comparisons must have already been folded. */
3438 static void
3439 prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
3440 enum machine_mode *pmode, int *punsignedp,
3441 enum can_compare_purpose purpose)
3443 enum machine_mode mode = *pmode;
3444 rtx x = *px, y = *py;
3445 int unsignedp = *punsignedp;
3447 /* If we are inside an appropriately-short loop and we are optimizing,
3448 force expensive constants into a register. */
3449 if (CONSTANT_P (x) && optimize
3450 && rtx_cost (x, COMPARE) > COSTS_N_INSNS (1))
3451 x = force_reg (mode, x);
3453 if (CONSTANT_P (y) && optimize
3454 && rtx_cost (y, COMPARE) > COSTS_N_INSNS (1))
3455 y = force_reg (mode, y);
3457 #ifdef HAVE_cc0
3458 /* Make sure if we have a canonical comparison. The RTL
3459 documentation states that canonical comparisons are required only
3460 for targets which have cc0. */
3461 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3462 #endif
3464 /* Don't let both operands fail to indicate the mode. */
3465 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3466 x = force_reg (mode, x);
3468 /* Handle all BLKmode compares. */
3470 if (mode == BLKmode)
3472 enum machine_mode cmp_mode, result_mode;
3473 enum insn_code cmp_code;
3474 tree length_type;
3475 rtx libfunc;
3476 rtx result;
3477 rtx opalign
3478 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3480 gcc_assert (size);
3482 /* Try to use a memory block compare insn - either cmpstr
3483 or cmpmem will do. */
3484 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
3485 cmp_mode != VOIDmode;
3486 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
3488 cmp_code = cmpmem_optab[cmp_mode];
3489 if (cmp_code == CODE_FOR_nothing)
3490 cmp_code = cmpstr_optab[cmp_mode];
3491 if (cmp_code == CODE_FOR_nothing)
3492 cmp_code = cmpstrn_optab[cmp_mode];
3493 if (cmp_code == CODE_FOR_nothing)
3494 continue;
3496 /* Must make sure the size fits the insn's mode. */
3497 if ((GET_CODE (size) == CONST_INT
3498 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
3499 || (GET_MODE_BITSIZE (GET_MODE (size))
3500 > GET_MODE_BITSIZE (cmp_mode)))
3501 continue;
3503 result_mode = insn_data[cmp_code].operand[0].mode;
3504 result = gen_reg_rtx (result_mode);
3505 size = convert_to_mode (cmp_mode, size, 1);
3506 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
3508 *px = result;
3509 *py = const0_rtx;
3510 *pmode = result_mode;
3511 return;
3514 /* Otherwise call a library function, memcmp. */
3515 libfunc = memcmp_libfunc;
3516 length_type = sizetype;
3517 result_mode = TYPE_MODE (integer_type_node);
3518 cmp_mode = TYPE_MODE (length_type);
3519 size = convert_to_mode (TYPE_MODE (length_type), size,
3520 TYPE_UNSIGNED (length_type));
3522 result = emit_library_call_value (libfunc, 0, LCT_PURE_MAKE_BLOCK,
3523 result_mode, 3,
3524 XEXP (x, 0), Pmode,
3525 XEXP (y, 0), Pmode,
3526 size, cmp_mode);
3527 *px = result;
3528 *py = const0_rtx;
3529 *pmode = result_mode;
3530 return;
3533 /* Don't allow operands to the compare to trap, as that can put the
3534 compare and branch in different basic blocks. */
3535 if (flag_non_call_exceptions)
3537 if (may_trap_p (x))
3538 x = force_reg (mode, x);
3539 if (may_trap_p (y))
3540 y = force_reg (mode, y);
3543 *px = x;
3544 *py = y;
3545 if (can_compare_p (*pcomparison, mode, purpose))
3546 return;
3548 /* Handle a lib call just for the mode we are using. */
3550 if (cmp_optab->handlers[(int) mode].libfunc && !SCALAR_FLOAT_MODE_P (mode))
3552 rtx libfunc = cmp_optab->handlers[(int) mode].libfunc;
3553 rtx result;
3555 /* If we want unsigned, and this mode has a distinct unsigned
3556 comparison routine, use that. */
3557 if (unsignedp && ucmp_optab->handlers[(int) mode].libfunc)
3558 libfunc = ucmp_optab->handlers[(int) mode].libfunc;
3560 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK,
3561 word_mode, 2, x, mode, y, mode);
3563 *px = result;
3564 *pmode = word_mode;
3565 if (TARGET_LIB_INT_CMP_BIASED)
3566 /* Integer comparison returns a result that must be compared
3567 against 1, so that even if we do an unsigned compare
3568 afterward, there is still a value that can represent the
3569 result "less than". */
3570 *py = const1_rtx;
3571 else
3573 *py = const0_rtx;
3574 *punsignedp = 1;
3576 return;
3579 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3580 prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
3583 /* Before emitting an insn with code ICODE, make sure that X, which is going
3584 to be used for operand OPNUM of the insn, is converted from mode MODE to
3585 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3586 that it is accepted by the operand predicate. Return the new value. */
3588 static rtx
3589 prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
3590 enum machine_mode wider_mode, int unsignedp)
3592 if (mode != wider_mode)
3593 x = convert_modes (wider_mode, mode, x, unsignedp);
3595 if (!insn_data[icode].operand[opnum].predicate
3596 (x, insn_data[icode].operand[opnum].mode))
3598 if (no_new_pseudos)
3599 return NULL_RTX;
3600 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x);
3603 return x;
3606 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3607 we can do the comparison.
3608 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3609 be NULL_RTX which indicates that only a comparison is to be generated. */
3611 static void
3612 emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
3613 enum rtx_code comparison, int unsignedp, rtx label)
3615 rtx test = gen_rtx_fmt_ee (comparison, mode, x, y);
3616 enum mode_class class = GET_MODE_CLASS (mode);
3617 enum machine_mode wider_mode = mode;
3619 /* Try combined insns first. */
3622 enum insn_code icode;
3623 PUT_MODE (test, wider_mode);
3625 if (label)
3627 icode = cbranch_optab->handlers[(int) wider_mode].insn_code;
3629 if (icode != CODE_FOR_nothing
3630 && insn_data[icode].operand[0].predicate (test, wider_mode))
3632 x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp);
3633 y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp);
3634 emit_jump_insn (GEN_FCN (icode) (test, x, y, label));
3635 return;
3639 /* Handle some compares against zero. */
3640 icode = (int) tst_optab->handlers[(int) wider_mode].insn_code;
3641 if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing)
3643 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3644 emit_insn (GEN_FCN (icode) (x));
3645 if (label)
3646 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3647 return;
3650 /* Handle compares for which there is a directly suitable insn. */
3652 icode = (int) cmp_optab->handlers[(int) wider_mode].insn_code;
3653 if (icode != CODE_FOR_nothing)
3655 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3656 y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp);
3657 emit_insn (GEN_FCN (icode) (x, y));
3658 if (label)
3659 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3660 return;
3663 if (!CLASS_HAS_WIDER_MODES_P (class))
3664 break;
3666 wider_mode = GET_MODE_WIDER_MODE (wider_mode);
3668 while (wider_mode != VOIDmode);
3670 gcc_unreachable ();
3673 /* Generate code to compare X with Y so that the condition codes are
3674 set and to jump to LABEL if the condition is true. If X is a
3675 constant and Y is not a constant, then the comparison is swapped to
3676 ensure that the comparison RTL has the canonical form.
3678 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3679 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3680 the proper branch condition code.
3682 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3684 MODE is the mode of the inputs (in case they are const_int).
3686 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3687 be passed unchanged to emit_cmp_insn, then potentially converted into an
3688 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3690 void
3691 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
3692 enum machine_mode mode, int unsignedp, rtx label)
3694 rtx op0 = x, op1 = y;
3696 /* Swap operands and condition to ensure canonical RTL. */
3697 if (swap_commutative_operands_p (x, y))
3699 /* If we're not emitting a branch, this means some caller
3700 is out of sync. */
3701 gcc_assert (label);
3703 op0 = y, op1 = x;
3704 comparison = swap_condition (comparison);
3707 #ifdef HAVE_cc0
3708 /* If OP0 is still a constant, then both X and Y must be constants.
3709 Force X into a register to create canonical RTL. */
3710 if (CONSTANT_P (op0))
3711 op0 = force_reg (mode, op0);
3712 #endif
3714 if (unsignedp)
3715 comparison = unsigned_condition (comparison);
3717 prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp,
3718 ccp_jump);
3719 emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label);
3722 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3724 void
3725 emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
3726 enum machine_mode mode, int unsignedp)
3728 emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0);
3731 /* Emit a library call comparison between floating point X and Y.
3732 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3734 static void
3735 prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
3736 enum machine_mode *pmode, int *punsignedp)
3738 enum rtx_code comparison = *pcomparison;
3739 enum rtx_code swapped = swap_condition (comparison);
3740 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
3741 rtx x = *px;
3742 rtx y = *py;
3743 enum machine_mode orig_mode = GET_MODE (x);
3744 enum machine_mode mode;
3745 rtx value, target, insns, equiv;
3746 rtx libfunc = 0;
3747 bool reversed_p = false;
3749 for (mode = orig_mode;
3750 mode != VOIDmode;
3751 mode = GET_MODE_WIDER_MODE (mode))
3753 if ((libfunc = code_to_optab[comparison]->handlers[mode].libfunc))
3754 break;
3756 if ((libfunc = code_to_optab[swapped]->handlers[mode].libfunc))
3758 rtx tmp;
3759 tmp = x; x = y; y = tmp;
3760 comparison = swapped;
3761 break;
3764 if ((libfunc = code_to_optab[reversed]->handlers[mode].libfunc)
3765 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, reversed))
3767 comparison = reversed;
3768 reversed_p = true;
3769 break;
3773 gcc_assert (mode != VOIDmode);
3775 if (mode != orig_mode)
3777 x = convert_to_mode (mode, x, 0);
3778 y = convert_to_mode (mode, y, 0);
3781 /* Attach a REG_EQUAL note describing the semantics of the libcall to
3782 the RTL. The allows the RTL optimizers to delete the libcall if the
3783 condition can be determined at compile-time. */
3784 if (comparison == UNORDERED)
3786 rtx temp = simplify_gen_relational (NE, word_mode, mode, x, x);
3787 equiv = simplify_gen_relational (NE, word_mode, mode, y, y);
3788 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
3789 temp, const_true_rtx, equiv);
3791 else
3793 equiv = simplify_gen_relational (comparison, word_mode, mode, x, y);
3794 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
3796 rtx true_rtx, false_rtx;
3798 switch (comparison)
3800 case EQ:
3801 true_rtx = const0_rtx;
3802 false_rtx = const_true_rtx;
3803 break;
3805 case NE:
3806 true_rtx = const_true_rtx;
3807 false_rtx = const0_rtx;
3808 break;
3810 case GT:
3811 true_rtx = const1_rtx;
3812 false_rtx = const0_rtx;
3813 break;
3815 case GE:
3816 true_rtx = const0_rtx;
3817 false_rtx = constm1_rtx;
3818 break;
3820 case LT:
3821 true_rtx = constm1_rtx;
3822 false_rtx = const0_rtx;
3823 break;
3825 case LE:
3826 true_rtx = const0_rtx;
3827 false_rtx = const1_rtx;
3828 break;
3830 default:
3831 gcc_unreachable ();
3833 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
3834 equiv, true_rtx, false_rtx);
3838 start_sequence ();
3839 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
3840 word_mode, 2, x, mode, y, mode);
3841 insns = get_insns ();
3842 end_sequence ();
3844 target = gen_reg_rtx (word_mode);
3845 emit_libcall_block (insns, target, value, equiv);
3847 if (comparison == UNORDERED
3848 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
3849 comparison = reversed_p ? EQ : NE;
3851 *px = target;
3852 *py = const0_rtx;
3853 *pmode = word_mode;
3854 *pcomparison = comparison;
3855 *punsignedp = 0;
3858 /* Generate code to indirectly jump to a location given in the rtx LOC. */
3860 void
3861 emit_indirect_jump (rtx loc)
3863 if (!insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate
3864 (loc, Pmode))
3865 loc = copy_to_mode_reg (Pmode, loc);
3867 emit_jump_insn (gen_indirect_jump (loc));
3868 emit_barrier ();
3871 #ifdef HAVE_conditional_move
3873 /* Emit a conditional move instruction if the machine supports one for that
3874 condition and machine mode.
3876 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3877 the mode to use should they be constants. If it is VOIDmode, they cannot
3878 both be constants.
3880 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
3881 should be stored there. MODE is the mode to use should they be constants.
3882 If it is VOIDmode, they cannot both be constants.
3884 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3885 is not supported. */
3888 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
3889 enum machine_mode cmode, rtx op2, rtx op3,
3890 enum machine_mode mode, int unsignedp)
3892 rtx tem, subtarget, comparison, insn;
3893 enum insn_code icode;
3894 enum rtx_code reversed;
3896 /* If one operand is constant, make it the second one. Only do this
3897 if the other operand is not constant as well. */
3899 if (swap_commutative_operands_p (op0, op1))
3901 tem = op0;
3902 op0 = op1;
3903 op1 = tem;
3904 code = swap_condition (code);
3907 /* get_condition will prefer to generate LT and GT even if the old
3908 comparison was against zero, so undo that canonicalization here since
3909 comparisons against zero are cheaper. */
3910 if (code == LT && op1 == const1_rtx)
3911 code = LE, op1 = const0_rtx;
3912 else if (code == GT && op1 == constm1_rtx)
3913 code = GE, op1 = const0_rtx;
3915 if (cmode == VOIDmode)
3916 cmode = GET_MODE (op0);
3918 if (swap_commutative_operands_p (op2, op3)
3919 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
3920 != UNKNOWN))
3922 tem = op2;
3923 op2 = op3;
3924 op3 = tem;
3925 code = reversed;
3928 if (mode == VOIDmode)
3929 mode = GET_MODE (op2);
3931 icode = movcc_gen_code[mode];
3933 if (icode == CODE_FOR_nothing)
3934 return 0;
3936 if (!target)
3937 target = gen_reg_rtx (mode);
3939 subtarget = target;
3941 /* If the insn doesn't accept these operands, put them in pseudos. */
3943 if (!insn_data[icode].operand[0].predicate
3944 (subtarget, insn_data[icode].operand[0].mode))
3945 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
3947 if (!insn_data[icode].operand[2].predicate
3948 (op2, insn_data[icode].operand[2].mode))
3949 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
3951 if (!insn_data[icode].operand[3].predicate
3952 (op3, insn_data[icode].operand[3].mode))
3953 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
3955 /* Everything should now be in the suitable form, so emit the compare insn
3956 and then the conditional move. */
3958 comparison
3959 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
3961 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
3962 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
3963 return NULL and let the caller figure out how best to deal with this
3964 situation. */
3965 if (GET_CODE (comparison) != code)
3966 return NULL_RTX;
3968 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
3970 /* If that failed, then give up. */
3971 if (insn == 0)
3972 return 0;
3974 emit_insn (insn);
3976 if (subtarget != target)
3977 convert_move (target, subtarget, 0);
3979 return target;
3982 /* Return nonzero if a conditional move of mode MODE is supported.
3984 This function is for combine so it can tell whether an insn that looks
3985 like a conditional move is actually supported by the hardware. If we
3986 guess wrong we lose a bit on optimization, but that's it. */
3987 /* ??? sparc64 supports conditionally moving integers values based on fp
3988 comparisons, and vice versa. How do we handle them? */
3991 can_conditionally_move_p (enum machine_mode mode)
3993 if (movcc_gen_code[mode] != CODE_FOR_nothing)
3994 return 1;
3996 return 0;
3999 #endif /* HAVE_conditional_move */
4001 /* Emit a conditional addition instruction if the machine supports one for that
4002 condition and machine mode.
4004 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4005 the mode to use should they be constants. If it is VOIDmode, they cannot
4006 both be constants.
4008 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4009 should be stored there. MODE is the mode to use should they be constants.
4010 If it is VOIDmode, they cannot both be constants.
4012 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4013 is not supported. */
4016 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4017 enum machine_mode cmode, rtx op2, rtx op3,
4018 enum machine_mode mode, int unsignedp)
4020 rtx tem, subtarget, comparison, insn;
4021 enum insn_code icode;
4022 enum rtx_code reversed;
4024 /* If one operand is constant, make it the second one. Only do this
4025 if the other operand is not constant as well. */
4027 if (swap_commutative_operands_p (op0, op1))
4029 tem = op0;
4030 op0 = op1;
4031 op1 = tem;
4032 code = swap_condition (code);
4035 /* get_condition will prefer to generate LT and GT even if the old
4036 comparison was against zero, so undo that canonicalization here since
4037 comparisons against zero are cheaper. */
4038 if (code == LT && op1 == const1_rtx)
4039 code = LE, op1 = const0_rtx;
4040 else if (code == GT && op1 == constm1_rtx)
4041 code = GE, op1 = const0_rtx;
4043 if (cmode == VOIDmode)
4044 cmode = GET_MODE (op0);
4046 if (swap_commutative_operands_p (op2, op3)
4047 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4048 != UNKNOWN))
4050 tem = op2;
4051 op2 = op3;
4052 op3 = tem;
4053 code = reversed;
4056 if (mode == VOIDmode)
4057 mode = GET_MODE (op2);
4059 icode = addcc_optab->handlers[(int) mode].insn_code;
4061 if (icode == CODE_FOR_nothing)
4062 return 0;
4064 if (!target)
4065 target = gen_reg_rtx (mode);
4067 /* If the insn doesn't accept these operands, put them in pseudos. */
4069 if (!insn_data[icode].operand[0].predicate
4070 (target, insn_data[icode].operand[0].mode))
4071 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4072 else
4073 subtarget = target;
4075 if (!insn_data[icode].operand[2].predicate
4076 (op2, insn_data[icode].operand[2].mode))
4077 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4079 if (!insn_data[icode].operand[3].predicate
4080 (op3, insn_data[icode].operand[3].mode))
4081 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4083 /* Everything should now be in the suitable form, so emit the compare insn
4084 and then the conditional move. */
4086 comparison
4087 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4089 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4090 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4091 return NULL and let the caller figure out how best to deal with this
4092 situation. */
4093 if (GET_CODE (comparison) != code)
4094 return NULL_RTX;
4096 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4098 /* If that failed, then give up. */
4099 if (insn == 0)
4100 return 0;
4102 emit_insn (insn);
4104 if (subtarget != target)
4105 convert_move (target, subtarget, 0);
4107 return target;
4110 /* These functions attempt to generate an insn body, rather than
4111 emitting the insn, but if the gen function already emits them, we
4112 make no attempt to turn them back into naked patterns. */
4114 /* Generate and return an insn body to add Y to X. */
4117 gen_add2_insn (rtx x, rtx y)
4119 int icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4121 gcc_assert (insn_data[icode].operand[0].predicate
4122 (x, insn_data[icode].operand[0].mode));
4123 gcc_assert (insn_data[icode].operand[1].predicate
4124 (x, insn_data[icode].operand[1].mode));
4125 gcc_assert (insn_data[icode].operand[2].predicate
4126 (y, insn_data[icode].operand[2].mode));
4128 return GEN_FCN (icode) (x, x, y);
4131 /* Generate and return an insn body to add r1 and c,
4132 storing the result in r0. */
4134 gen_add3_insn (rtx r0, rtx r1, rtx c)
4136 int icode = (int) add_optab->handlers[(int) GET_MODE (r0)].insn_code;
4138 if (icode == CODE_FOR_nothing
4139 || !(insn_data[icode].operand[0].predicate
4140 (r0, insn_data[icode].operand[0].mode))
4141 || !(insn_data[icode].operand[1].predicate
4142 (r1, insn_data[icode].operand[1].mode))
4143 || !(insn_data[icode].operand[2].predicate
4144 (c, insn_data[icode].operand[2].mode)))
4145 return NULL_RTX;
4147 return GEN_FCN (icode) (r0, r1, c);
4151 have_add2_insn (rtx x, rtx y)
4153 int icode;
4155 gcc_assert (GET_MODE (x) != VOIDmode);
4157 icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4159 if (icode == CODE_FOR_nothing)
4160 return 0;
4162 if (!(insn_data[icode].operand[0].predicate
4163 (x, insn_data[icode].operand[0].mode))
4164 || !(insn_data[icode].operand[1].predicate
4165 (x, insn_data[icode].operand[1].mode))
4166 || !(insn_data[icode].operand[2].predicate
4167 (y, insn_data[icode].operand[2].mode)))
4168 return 0;
4170 return 1;
4173 /* Generate and return an insn body to subtract Y from X. */
4176 gen_sub2_insn (rtx x, rtx y)
4178 int icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4180 gcc_assert (insn_data[icode].operand[0].predicate
4181 (x, insn_data[icode].operand[0].mode));
4182 gcc_assert (insn_data[icode].operand[1].predicate
4183 (x, insn_data[icode].operand[1].mode));
4184 gcc_assert (insn_data[icode].operand[2].predicate
4185 (y, insn_data[icode].operand[2].mode));
4187 return GEN_FCN (icode) (x, x, y);
4190 /* Generate and return an insn body to subtract r1 and c,
4191 storing the result in r0. */
4193 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4195 int icode = (int) sub_optab->handlers[(int) GET_MODE (r0)].insn_code;
4197 if (icode == CODE_FOR_nothing
4198 || !(insn_data[icode].operand[0].predicate
4199 (r0, insn_data[icode].operand[0].mode))
4200 || !(insn_data[icode].operand[1].predicate
4201 (r1, insn_data[icode].operand[1].mode))
4202 || !(insn_data[icode].operand[2].predicate
4203 (c, insn_data[icode].operand[2].mode)))
4204 return NULL_RTX;
4206 return GEN_FCN (icode) (r0, r1, c);
4210 have_sub2_insn (rtx x, rtx y)
4212 int icode;
4214 gcc_assert (GET_MODE (x) != VOIDmode);
4216 icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4218 if (icode == CODE_FOR_nothing)
4219 return 0;
4221 if (!(insn_data[icode].operand[0].predicate
4222 (x, insn_data[icode].operand[0].mode))
4223 || !(insn_data[icode].operand[1].predicate
4224 (x, insn_data[icode].operand[1].mode))
4225 || !(insn_data[icode].operand[2].predicate
4226 (y, insn_data[icode].operand[2].mode)))
4227 return 0;
4229 return 1;
4232 /* Generate the body of an instruction to copy Y into X.
4233 It may be a list of insns, if one insn isn't enough. */
4236 gen_move_insn (rtx x, rtx y)
4238 rtx seq;
4240 start_sequence ();
4241 emit_move_insn_1 (x, y);
4242 seq = get_insns ();
4243 end_sequence ();
4244 return seq;
4247 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4248 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4249 no such operation exists, CODE_FOR_nothing will be returned. */
4251 enum insn_code
4252 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4253 int unsignedp)
4255 convert_optab tab;
4256 #ifdef HAVE_ptr_extend
4257 if (unsignedp < 0)
4258 return CODE_FOR_ptr_extend;
4259 #endif
4261 tab = unsignedp ? zext_optab : sext_optab;
4262 return tab->handlers[to_mode][from_mode].insn_code;
4265 /* Generate the body of an insn to extend Y (with mode MFROM)
4266 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4269 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4270 enum machine_mode mfrom, int unsignedp)
4272 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4273 return GEN_FCN (icode) (x, y);
4276 /* can_fix_p and can_float_p say whether the target machine
4277 can directly convert a given fixed point type to
4278 a given floating point type, or vice versa.
4279 The returned value is the CODE_FOR_... value to use,
4280 or CODE_FOR_nothing if these modes cannot be directly converted.
4282 *TRUNCP_PTR is set to 1 if it is necessary to output
4283 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4285 static enum insn_code
4286 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4287 int unsignedp, int *truncp_ptr)
4289 convert_optab tab;
4290 enum insn_code icode;
4292 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4293 icode = tab->handlers[fixmode][fltmode].insn_code;
4294 if (icode != CODE_FOR_nothing)
4296 *truncp_ptr = 0;
4297 return icode;
4300 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4301 for this to work. We need to rework the fix* and ftrunc* patterns
4302 and documentation. */
4303 tab = unsignedp ? ufix_optab : sfix_optab;
4304 icode = tab->handlers[fixmode][fltmode].insn_code;
4305 if (icode != CODE_FOR_nothing
4306 && ftrunc_optab->handlers[fltmode].insn_code != CODE_FOR_nothing)
4308 *truncp_ptr = 1;
4309 return icode;
4312 *truncp_ptr = 0;
4313 return CODE_FOR_nothing;
4316 static enum insn_code
4317 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4318 int unsignedp)
4320 convert_optab tab;
4322 tab = unsignedp ? ufloat_optab : sfloat_optab;
4323 return tab->handlers[fltmode][fixmode].insn_code;
4326 /* Generate code to convert FROM to floating point
4327 and store in TO. FROM must be fixed point and not VOIDmode.
4328 UNSIGNEDP nonzero means regard FROM as unsigned.
4329 Normally this is done by correcting the final value
4330 if it is negative. */
4332 void
4333 expand_float (rtx to, rtx from, int unsignedp)
4335 enum insn_code icode;
4336 rtx target = to;
4337 enum machine_mode fmode, imode;
4338 bool can_do_signed = false;
4340 /* Crash now, because we won't be able to decide which mode to use. */
4341 gcc_assert (GET_MODE (from) != VOIDmode);
4343 /* Look for an insn to do the conversion. Do it in the specified
4344 modes if possible; otherwise convert either input, output or both to
4345 wider mode. If the integer mode is wider than the mode of FROM,
4346 we can do the conversion signed even if the input is unsigned. */
4348 for (fmode = GET_MODE (to); fmode != VOIDmode;
4349 fmode = GET_MODE_WIDER_MODE (fmode))
4350 for (imode = GET_MODE (from); imode != VOIDmode;
4351 imode = GET_MODE_WIDER_MODE (imode))
4353 int doing_unsigned = unsignedp;
4355 if (fmode != GET_MODE (to)
4356 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from)))
4357 continue;
4359 icode = can_float_p (fmode, imode, unsignedp);
4360 if (icode == CODE_FOR_nothing && unsignedp)
4362 enum insn_code scode = can_float_p (fmode, imode, 0);
4363 if (scode != CODE_FOR_nothing)
4364 can_do_signed = true;
4365 if (imode != GET_MODE (from))
4366 icode = scode, doing_unsigned = 0;
4369 if (icode != CODE_FOR_nothing)
4371 if (imode != GET_MODE (from))
4372 from = convert_to_mode (imode, from, unsignedp);
4374 if (fmode != GET_MODE (to))
4375 target = gen_reg_rtx (fmode);
4377 emit_unop_insn (icode, target, from,
4378 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4380 if (target != to)
4381 convert_move (to, target, 0);
4382 return;
4386 /* Unsigned integer, and no way to convert directly.
4387 Convert as signed, then conditionally adjust the result. */
4388 if (unsignedp && can_do_signed)
4390 rtx label = gen_label_rtx ();
4391 rtx temp;
4392 REAL_VALUE_TYPE offset;
4394 /* Look for a usable floating mode FMODE wider than the source and at
4395 least as wide as the target. Using FMODE will avoid rounding woes
4396 with unsigned values greater than the signed maximum value. */
4398 for (fmode = GET_MODE (to); fmode != VOIDmode;
4399 fmode = GET_MODE_WIDER_MODE (fmode))
4400 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
4401 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
4402 break;
4404 if (fmode == VOIDmode)
4406 /* There is no such mode. Pretend the target is wide enough. */
4407 fmode = GET_MODE (to);
4409 /* Avoid double-rounding when TO is narrower than FROM. */
4410 if ((significand_size (fmode) + 1)
4411 < GET_MODE_BITSIZE (GET_MODE (from)))
4413 rtx temp1;
4414 rtx neglabel = gen_label_rtx ();
4416 /* Don't use TARGET if it isn't a register, is a hard register,
4417 or is the wrong mode. */
4418 if (!REG_P (target)
4419 || REGNO (target) < FIRST_PSEUDO_REGISTER
4420 || GET_MODE (target) != fmode)
4421 target = gen_reg_rtx (fmode);
4423 imode = GET_MODE (from);
4424 do_pending_stack_adjust ();
4426 /* Test whether the sign bit is set. */
4427 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4428 0, neglabel);
4430 /* The sign bit is not set. Convert as signed. */
4431 expand_float (target, from, 0);
4432 emit_jump_insn (gen_jump (label));
4433 emit_barrier ();
4435 /* The sign bit is set.
4436 Convert to a usable (positive signed) value by shifting right
4437 one bit, while remembering if a nonzero bit was shifted
4438 out; i.e., compute (from & 1) | (from >> 1). */
4440 emit_label (neglabel);
4441 temp = expand_binop (imode, and_optab, from, const1_rtx,
4442 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4443 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
4444 NULL_RTX, 1);
4445 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4446 OPTAB_LIB_WIDEN);
4447 expand_float (target, temp, 0);
4449 /* Multiply by 2 to undo the shift above. */
4450 temp = expand_binop (fmode, add_optab, target, target,
4451 target, 0, OPTAB_LIB_WIDEN);
4452 if (temp != target)
4453 emit_move_insn (target, temp);
4455 do_pending_stack_adjust ();
4456 emit_label (label);
4457 goto done;
4461 /* If we are about to do some arithmetic to correct for an
4462 unsigned operand, do it in a pseudo-register. */
4464 if (GET_MODE (to) != fmode
4465 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4466 target = gen_reg_rtx (fmode);
4468 /* Convert as signed integer to floating. */
4469 expand_float (target, from, 0);
4471 /* If FROM is negative (and therefore TO is negative),
4472 correct its value by 2**bitwidth. */
4474 do_pending_stack_adjust ();
4475 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
4476 0, label);
4479 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)));
4480 temp = expand_binop (fmode, add_optab, target,
4481 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
4482 target, 0, OPTAB_LIB_WIDEN);
4483 if (temp != target)
4484 emit_move_insn (target, temp);
4486 do_pending_stack_adjust ();
4487 emit_label (label);
4488 goto done;
4491 /* No hardware instruction available; call a library routine. */
4493 rtx libfunc;
4494 rtx insns;
4495 rtx value;
4496 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4498 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
4499 from = convert_to_mode (SImode, from, unsignedp);
4501 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4502 gcc_assert (libfunc);
4504 start_sequence ();
4506 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4507 GET_MODE (to), 1, from,
4508 GET_MODE (from));
4509 insns = get_insns ();
4510 end_sequence ();
4512 emit_libcall_block (insns, target, value,
4513 gen_rtx_FLOAT (GET_MODE (to), from));
4516 done:
4518 /* Copy result to requested destination
4519 if we have been computing in a temp location. */
4521 if (target != to)
4523 if (GET_MODE (target) == GET_MODE (to))
4524 emit_move_insn (to, target);
4525 else
4526 convert_move (to, target, 0);
4530 /* Generate code to convert FROM to fixed point and store in TO. FROM
4531 must be floating point. */
4533 void
4534 expand_fix (rtx to, rtx from, int unsignedp)
4536 enum insn_code icode;
4537 rtx target = to;
4538 enum machine_mode fmode, imode;
4539 int must_trunc = 0;
4541 /* We first try to find a pair of modes, one real and one integer, at
4542 least as wide as FROM and TO, respectively, in which we can open-code
4543 this conversion. If the integer mode is wider than the mode of TO,
4544 we can do the conversion either signed or unsigned. */
4546 for (fmode = GET_MODE (from); fmode != VOIDmode;
4547 fmode = GET_MODE_WIDER_MODE (fmode))
4548 for (imode = GET_MODE (to); imode != VOIDmode;
4549 imode = GET_MODE_WIDER_MODE (imode))
4551 int doing_unsigned = unsignedp;
4553 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
4554 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
4555 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
4557 if (icode != CODE_FOR_nothing)
4559 if (fmode != GET_MODE (from))
4560 from = convert_to_mode (fmode, from, 0);
4562 if (must_trunc)
4564 rtx temp = gen_reg_rtx (GET_MODE (from));
4565 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
4566 temp, 0);
4569 if (imode != GET_MODE (to))
4570 target = gen_reg_rtx (imode);
4572 emit_unop_insn (icode, target, from,
4573 doing_unsigned ? UNSIGNED_FIX : FIX);
4574 if (target != to)
4575 convert_move (to, target, unsignedp);
4576 return;
4580 /* For an unsigned conversion, there is one more way to do it.
4581 If we have a signed conversion, we generate code that compares
4582 the real value to the largest representable positive number. If if
4583 is smaller, the conversion is done normally. Otherwise, subtract
4584 one plus the highest signed number, convert, and add it back.
4586 We only need to check all real modes, since we know we didn't find
4587 anything with a wider integer mode.
4589 This code used to extend FP value into mode wider than the destination.
4590 This is not needed. Consider, for instance conversion from SFmode
4591 into DImode.
4593 The hot path trought the code is dealing with inputs smaller than 2^63
4594 and doing just the conversion, so there is no bits to lose.
4596 In the other path we know the value is positive in the range 2^63..2^64-1
4597 inclusive. (as for other imput overflow happens and result is undefined)
4598 So we know that the most important bit set in mantissa corresponds to
4599 2^63. The subtraction of 2^63 should not generate any rounding as it
4600 simply clears out that bit. The rest is trivial. */
4602 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
4603 for (fmode = GET_MODE (from); fmode != VOIDmode;
4604 fmode = GET_MODE_WIDER_MODE (fmode))
4605 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0,
4606 &must_trunc))
4608 int bitsize;
4609 REAL_VALUE_TYPE offset;
4610 rtx limit, lab1, lab2, insn;
4612 bitsize = GET_MODE_BITSIZE (GET_MODE (to));
4613 real_2expN (&offset, bitsize - 1);
4614 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
4615 lab1 = gen_label_rtx ();
4616 lab2 = gen_label_rtx ();
4618 if (fmode != GET_MODE (from))
4619 from = convert_to_mode (fmode, from, 0);
4621 /* See if we need to do the subtraction. */
4622 do_pending_stack_adjust ();
4623 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
4624 0, lab1);
4626 /* If not, do the signed "fix" and branch around fixup code. */
4627 expand_fix (to, from, 0);
4628 emit_jump_insn (gen_jump (lab2));
4629 emit_barrier ();
4631 /* Otherwise, subtract 2**(N-1), convert to signed number,
4632 then add 2**(N-1). Do the addition using XOR since this
4633 will often generate better code. */
4634 emit_label (lab1);
4635 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
4636 NULL_RTX, 0, OPTAB_LIB_WIDEN);
4637 expand_fix (to, target, 0);
4638 target = expand_binop (GET_MODE (to), xor_optab, to,
4639 gen_int_mode
4640 ((HOST_WIDE_INT) 1 << (bitsize - 1),
4641 GET_MODE (to)),
4642 to, 1, OPTAB_LIB_WIDEN);
4644 if (target != to)
4645 emit_move_insn (to, target);
4647 emit_label (lab2);
4649 if (mov_optab->handlers[(int) GET_MODE (to)].insn_code
4650 != CODE_FOR_nothing)
4652 /* Make a place for a REG_NOTE and add it. */
4653 insn = emit_move_insn (to, to);
4654 set_unique_reg_note (insn,
4655 REG_EQUAL,
4656 gen_rtx_fmt_e (UNSIGNED_FIX,
4657 GET_MODE (to),
4658 copy_rtx (from)));
4661 return;
4664 /* We can't do it with an insn, so use a library call. But first ensure
4665 that the mode of TO is at least as wide as SImode, since those are the
4666 only library calls we know about. */
4668 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
4670 target = gen_reg_rtx (SImode);
4672 expand_fix (target, from, unsignedp);
4674 else
4676 rtx insns;
4677 rtx value;
4678 rtx libfunc;
4680 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
4681 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4682 gcc_assert (libfunc);
4684 start_sequence ();
4686 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4687 GET_MODE (to), 1, from,
4688 GET_MODE (from));
4689 insns = get_insns ();
4690 end_sequence ();
4692 emit_libcall_block (insns, target, value,
4693 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
4694 GET_MODE (to), from));
4697 if (target != to)
4699 if (GET_MODE (to) == GET_MODE (target))
4700 emit_move_insn (to, target);
4701 else
4702 convert_move (to, target, 0);
4706 /* Report whether we have an instruction to perform the operation
4707 specified by CODE on operands of mode MODE. */
4709 have_insn_for (enum rtx_code code, enum machine_mode mode)
4711 return (code_to_optab[(int) code] != 0
4712 && (code_to_optab[(int) code]->handlers[(int) mode].insn_code
4713 != CODE_FOR_nothing));
4716 /* Create a blank optab. */
4717 static optab
4718 new_optab (void)
4720 int i;
4721 optab op = ggc_alloc (sizeof (struct optab));
4722 for (i = 0; i < NUM_MACHINE_MODES; i++)
4724 op->handlers[i].insn_code = CODE_FOR_nothing;
4725 op->handlers[i].libfunc = 0;
4728 return op;
4731 static convert_optab
4732 new_convert_optab (void)
4734 int i, j;
4735 convert_optab op = ggc_alloc (sizeof (struct convert_optab));
4736 for (i = 0; i < NUM_MACHINE_MODES; i++)
4737 for (j = 0; j < NUM_MACHINE_MODES; j++)
4739 op->handlers[i][j].insn_code = CODE_FOR_nothing;
4740 op->handlers[i][j].libfunc = 0;
4742 return op;
4745 /* Same, but fill in its code as CODE, and write it into the
4746 code_to_optab table. */
4747 static inline optab
4748 init_optab (enum rtx_code code)
4750 optab op = new_optab ();
4751 op->code = code;
4752 code_to_optab[(int) code] = op;
4753 return op;
4756 /* Same, but fill in its code as CODE, and do _not_ write it into
4757 the code_to_optab table. */
4758 static inline optab
4759 init_optabv (enum rtx_code code)
4761 optab op = new_optab ();
4762 op->code = code;
4763 return op;
4766 /* Conversion optabs never go in the code_to_optab table. */
4767 static inline convert_optab
4768 init_convert_optab (enum rtx_code code)
4770 convert_optab op = new_convert_optab ();
4771 op->code = code;
4772 return op;
4775 /* Initialize the libfunc fields of an entire group of entries in some
4776 optab. Each entry is set equal to a string consisting of a leading
4777 pair of underscores followed by a generic operation name followed by
4778 a mode name (downshifted to lowercase) followed by a single character
4779 representing the number of operands for the given operation (which is
4780 usually one of the characters '2', '3', or '4').
4782 OPTABLE is the table in which libfunc fields are to be initialized.
4783 FIRST_MODE is the first machine mode index in the given optab to
4784 initialize.
4785 LAST_MODE is the last machine mode index in the given optab to
4786 initialize.
4787 OPNAME is the generic (string) name of the operation.
4788 SUFFIX is the character which specifies the number of operands for
4789 the given generic operation.
4792 static void
4793 init_libfuncs (optab optable, int first_mode, int last_mode,
4794 const char *opname, int suffix)
4796 int mode;
4797 unsigned opname_len = strlen (opname);
4799 for (mode = first_mode; (int) mode <= (int) last_mode;
4800 mode = (enum machine_mode) ((int) mode + 1))
4802 const char *mname = GET_MODE_NAME (mode);
4803 unsigned mname_len = strlen (mname);
4804 char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1);
4805 char *p;
4806 const char *q;
4808 p = libfunc_name;
4809 *p++ = '_';
4810 *p++ = '_';
4811 for (q = opname; *q; )
4812 *p++ = *q++;
4813 for (q = mname; *q; q++)
4814 *p++ = TOLOWER (*q);
4815 *p++ = suffix;
4816 *p = '\0';
4818 optable->handlers[(int) mode].libfunc
4819 = init_one_libfunc (ggc_alloc_string (libfunc_name, p - libfunc_name));
4823 /* Initialize the libfunc fields of an entire group of entries in some
4824 optab which correspond to all integer mode operations. The parameters
4825 have the same meaning as similarly named ones for the `init_libfuncs'
4826 routine. (See above). */
4828 static void
4829 init_integral_libfuncs (optab optable, const char *opname, int suffix)
4831 int maxsize = 2*BITS_PER_WORD;
4832 if (maxsize < LONG_LONG_TYPE_SIZE)
4833 maxsize = LONG_LONG_TYPE_SIZE;
4834 init_libfuncs (optable, word_mode,
4835 mode_for_size (maxsize, MODE_INT, 0),
4836 opname, suffix);
4839 /* Initialize the libfunc fields of an entire group of entries in some
4840 optab which correspond to all real mode operations. The parameters
4841 have the same meaning as similarly named ones for the `init_libfuncs'
4842 routine. (See above). */
4844 static void
4845 init_floating_libfuncs (optab optable, const char *opname, int suffix)
4847 init_libfuncs (optable, MIN_MODE_FLOAT, MAX_MODE_FLOAT, opname, suffix);
4850 /* Initialize the libfunc fields of an entire group of entries of an
4851 inter-mode-class conversion optab. The string formation rules are
4852 similar to the ones for init_libfuncs, above, but instead of having
4853 a mode name and an operand count these functions have two mode names
4854 and no operand count. */
4855 static void
4856 init_interclass_conv_libfuncs (convert_optab tab, const char *opname,
4857 enum mode_class from_class,
4858 enum mode_class to_class)
4860 enum machine_mode first_from_mode = GET_CLASS_NARROWEST_MODE (from_class);
4861 enum machine_mode first_to_mode = GET_CLASS_NARROWEST_MODE (to_class);
4862 size_t opname_len = strlen (opname);
4863 size_t max_mname_len = 0;
4865 enum machine_mode fmode, tmode;
4866 const char *fname, *tname;
4867 const char *q;
4868 char *libfunc_name, *suffix;
4869 char *p;
4871 for (fmode = first_from_mode;
4872 fmode != VOIDmode;
4873 fmode = GET_MODE_WIDER_MODE (fmode))
4874 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (fmode)));
4876 for (tmode = first_to_mode;
4877 tmode != VOIDmode;
4878 tmode = GET_MODE_WIDER_MODE (tmode))
4879 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (tmode)));
4881 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
4882 libfunc_name[0] = '_';
4883 libfunc_name[1] = '_';
4884 memcpy (&libfunc_name[2], opname, opname_len);
4885 suffix = libfunc_name + opname_len + 2;
4887 for (fmode = first_from_mode; fmode != VOIDmode;
4888 fmode = GET_MODE_WIDER_MODE (fmode))
4889 for (tmode = first_to_mode; tmode != VOIDmode;
4890 tmode = GET_MODE_WIDER_MODE (tmode))
4892 fname = GET_MODE_NAME (fmode);
4893 tname = GET_MODE_NAME (tmode);
4895 p = suffix;
4896 for (q = fname; *q; p++, q++)
4897 *p = TOLOWER (*q);
4898 for (q = tname; *q; p++, q++)
4899 *p = TOLOWER (*q);
4901 *p = '\0';
4903 tab->handlers[tmode][fmode].libfunc
4904 = init_one_libfunc (ggc_alloc_string (libfunc_name,
4905 p - libfunc_name));
4909 /* Initialize the libfunc fields of an entire group of entries of an
4910 intra-mode-class conversion optab. The string formation rules are
4911 similar to the ones for init_libfunc, above. WIDENING says whether
4912 the optab goes from narrow to wide modes or vice versa. These functions
4913 have two mode names _and_ an operand count. */
4914 static void
4915 init_intraclass_conv_libfuncs (convert_optab tab, const char *opname,
4916 enum mode_class class, bool widening)
4918 enum machine_mode first_mode = GET_CLASS_NARROWEST_MODE (class);
4919 size_t opname_len = strlen (opname);
4920 size_t max_mname_len = 0;
4922 enum machine_mode nmode, wmode;
4923 const char *nname, *wname;
4924 const char *q;
4925 char *libfunc_name, *suffix;
4926 char *p;
4928 for (nmode = first_mode; nmode != VOIDmode;
4929 nmode = GET_MODE_WIDER_MODE (nmode))
4930 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (nmode)));
4932 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
4933 libfunc_name[0] = '_';
4934 libfunc_name[1] = '_';
4935 memcpy (&libfunc_name[2], opname, opname_len);
4936 suffix = libfunc_name + opname_len + 2;
4938 for (nmode = first_mode; nmode != VOIDmode;
4939 nmode = GET_MODE_WIDER_MODE (nmode))
4940 for (wmode = GET_MODE_WIDER_MODE (nmode); wmode != VOIDmode;
4941 wmode = GET_MODE_WIDER_MODE (wmode))
4943 nname = GET_MODE_NAME (nmode);
4944 wname = GET_MODE_NAME (wmode);
4946 p = suffix;
4947 for (q = widening ? nname : wname; *q; p++, q++)
4948 *p = TOLOWER (*q);
4949 for (q = widening ? wname : nname; *q; p++, q++)
4950 *p = TOLOWER (*q);
4952 *p++ = '2';
4953 *p = '\0';
4955 tab->handlers[widening ? wmode : nmode]
4956 [widening ? nmode : wmode].libfunc
4957 = init_one_libfunc (ggc_alloc_string (libfunc_name,
4958 p - libfunc_name));
4964 init_one_libfunc (const char *name)
4966 rtx symbol;
4968 /* Create a FUNCTION_DECL that can be passed to
4969 targetm.encode_section_info. */
4970 /* ??? We don't have any type information except for this is
4971 a function. Pretend this is "int foo()". */
4972 tree decl = build_decl (FUNCTION_DECL, get_identifier (name),
4973 build_function_type (integer_type_node, NULL_TREE));
4974 DECL_ARTIFICIAL (decl) = 1;
4975 DECL_EXTERNAL (decl) = 1;
4976 TREE_PUBLIC (decl) = 1;
4978 symbol = XEXP (DECL_RTL (decl), 0);
4980 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
4981 are the flags assigned by targetm.encode_section_info. */
4982 SYMBOL_REF_DECL (symbol) = 0;
4984 return symbol;
4987 /* Call this to reset the function entry for one optab (OPTABLE) in mode
4988 MODE to NAME, which should be either 0 or a string constant. */
4989 void
4990 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
4992 if (name)
4993 optable->handlers[mode].libfunc = init_one_libfunc (name);
4994 else
4995 optable->handlers[mode].libfunc = 0;
4998 /* Call this to reset the function entry for one conversion optab
4999 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
5000 either 0 or a string constant. */
5001 void
5002 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
5003 enum machine_mode fmode, const char *name)
5005 if (name)
5006 optable->handlers[tmode][fmode].libfunc = init_one_libfunc (name);
5007 else
5008 optable->handlers[tmode][fmode].libfunc = 0;
5011 /* Call this once to initialize the contents of the optabs
5012 appropriately for the current target machine. */
5014 void
5015 init_optabs (void)
5017 unsigned int i;
5019 /* Start by initializing all tables to contain CODE_FOR_nothing. */
5021 for (i = 0; i < NUM_RTX_CODE; i++)
5022 setcc_gen_code[i] = CODE_FOR_nothing;
5024 #ifdef HAVE_conditional_move
5025 for (i = 0; i < NUM_MACHINE_MODES; i++)
5026 movcc_gen_code[i] = CODE_FOR_nothing;
5027 #endif
5029 for (i = 0; i < NUM_MACHINE_MODES; i++)
5031 vcond_gen_code[i] = CODE_FOR_nothing;
5032 vcondu_gen_code[i] = CODE_FOR_nothing;
5035 add_optab = init_optab (PLUS);
5036 addv_optab = init_optabv (PLUS);
5037 sub_optab = init_optab (MINUS);
5038 subv_optab = init_optabv (MINUS);
5039 smul_optab = init_optab (MULT);
5040 smulv_optab = init_optabv (MULT);
5041 smul_highpart_optab = init_optab (UNKNOWN);
5042 umul_highpart_optab = init_optab (UNKNOWN);
5043 smul_widen_optab = init_optab (UNKNOWN);
5044 umul_widen_optab = init_optab (UNKNOWN);
5045 usmul_widen_optab = init_optab (UNKNOWN);
5046 sdiv_optab = init_optab (DIV);
5047 sdivv_optab = init_optabv (DIV);
5048 sdivmod_optab = init_optab (UNKNOWN);
5049 udiv_optab = init_optab (UDIV);
5050 udivmod_optab = init_optab (UNKNOWN);
5051 smod_optab = init_optab (MOD);
5052 umod_optab = init_optab (UMOD);
5053 fmod_optab = init_optab (UNKNOWN);
5054 drem_optab = init_optab (UNKNOWN);
5055 ftrunc_optab = init_optab (UNKNOWN);
5056 and_optab = init_optab (AND);
5057 ior_optab = init_optab (IOR);
5058 xor_optab = init_optab (XOR);
5059 ashl_optab = init_optab (ASHIFT);
5060 ashr_optab = init_optab (ASHIFTRT);
5061 lshr_optab = init_optab (LSHIFTRT);
5062 rotl_optab = init_optab (ROTATE);
5063 rotr_optab = init_optab (ROTATERT);
5064 smin_optab = init_optab (SMIN);
5065 smax_optab = init_optab (SMAX);
5066 umin_optab = init_optab (UMIN);
5067 umax_optab = init_optab (UMAX);
5068 pow_optab = init_optab (UNKNOWN);
5069 atan2_optab = init_optab (UNKNOWN);
5071 /* These three have codes assigned exclusively for the sake of
5072 have_insn_for. */
5073 mov_optab = init_optab (SET);
5074 movstrict_optab = init_optab (STRICT_LOW_PART);
5075 cmp_optab = init_optab (COMPARE);
5077 ucmp_optab = init_optab (UNKNOWN);
5078 tst_optab = init_optab (UNKNOWN);
5080 eq_optab = init_optab (EQ);
5081 ne_optab = init_optab (NE);
5082 gt_optab = init_optab (GT);
5083 ge_optab = init_optab (GE);
5084 lt_optab = init_optab (LT);
5085 le_optab = init_optab (LE);
5086 unord_optab = init_optab (UNORDERED);
5088 neg_optab = init_optab (NEG);
5089 negv_optab = init_optabv (NEG);
5090 abs_optab = init_optab (ABS);
5091 absv_optab = init_optabv (ABS);
5092 addcc_optab = init_optab (UNKNOWN);
5093 one_cmpl_optab = init_optab (NOT);
5094 ffs_optab = init_optab (FFS);
5095 clz_optab = init_optab (CLZ);
5096 ctz_optab = init_optab (CTZ);
5097 popcount_optab = init_optab (POPCOUNT);
5098 parity_optab = init_optab (PARITY);
5099 sqrt_optab = init_optab (SQRT);
5100 floor_optab = init_optab (UNKNOWN);
5101 lfloor_optab = init_optab (UNKNOWN);
5102 ceil_optab = init_optab (UNKNOWN);
5103 lceil_optab = init_optab (UNKNOWN);
5104 round_optab = init_optab (UNKNOWN);
5105 btrunc_optab = init_optab (UNKNOWN);
5106 nearbyint_optab = init_optab (UNKNOWN);
5107 rint_optab = init_optab (UNKNOWN);
5108 lrint_optab = init_optab (UNKNOWN);
5109 sincos_optab = init_optab (UNKNOWN);
5110 sin_optab = init_optab (UNKNOWN);
5111 asin_optab = init_optab (UNKNOWN);
5112 cos_optab = init_optab (UNKNOWN);
5113 acos_optab = init_optab (UNKNOWN);
5114 exp_optab = init_optab (UNKNOWN);
5115 exp10_optab = init_optab (UNKNOWN);
5116 exp2_optab = init_optab (UNKNOWN);
5117 expm1_optab = init_optab (UNKNOWN);
5118 ldexp_optab = init_optab (UNKNOWN);
5119 logb_optab = init_optab (UNKNOWN);
5120 ilogb_optab = init_optab (UNKNOWN);
5121 log_optab = init_optab (UNKNOWN);
5122 log10_optab = init_optab (UNKNOWN);
5123 log2_optab = init_optab (UNKNOWN);
5124 log1p_optab = init_optab (UNKNOWN);
5125 tan_optab = init_optab (UNKNOWN);
5126 atan_optab = init_optab (UNKNOWN);
5127 copysign_optab = init_optab (UNKNOWN);
5129 strlen_optab = init_optab (UNKNOWN);
5130 cbranch_optab = init_optab (UNKNOWN);
5131 cmov_optab = init_optab (UNKNOWN);
5132 cstore_optab = init_optab (UNKNOWN);
5133 push_optab = init_optab (UNKNOWN);
5135 reduc_smax_optab = init_optab (UNKNOWN);
5136 reduc_umax_optab = init_optab (UNKNOWN);
5137 reduc_smin_optab = init_optab (UNKNOWN);
5138 reduc_umin_optab = init_optab (UNKNOWN);
5139 reduc_splus_optab = init_optab (UNKNOWN);
5140 reduc_uplus_optab = init_optab (UNKNOWN);
5142 vec_extract_optab = init_optab (UNKNOWN);
5143 vec_set_optab = init_optab (UNKNOWN);
5144 vec_init_optab = init_optab (UNKNOWN);
5145 vec_shl_optab = init_optab (UNKNOWN);
5146 vec_shr_optab = init_optab (UNKNOWN);
5147 vec_realign_load_optab = init_optab (UNKNOWN);
5148 movmisalign_optab = init_optab (UNKNOWN);
5150 powi_optab = init_optab (UNKNOWN);
5152 /* Conversions. */
5153 sext_optab = init_convert_optab (SIGN_EXTEND);
5154 zext_optab = init_convert_optab (ZERO_EXTEND);
5155 trunc_optab = init_convert_optab (TRUNCATE);
5156 sfix_optab = init_convert_optab (FIX);
5157 ufix_optab = init_convert_optab (UNSIGNED_FIX);
5158 sfixtrunc_optab = init_convert_optab (UNKNOWN);
5159 ufixtrunc_optab = init_convert_optab (UNKNOWN);
5160 sfloat_optab = init_convert_optab (FLOAT);
5161 ufloat_optab = init_convert_optab (UNSIGNED_FLOAT);
5163 for (i = 0; i < NUM_MACHINE_MODES; i++)
5165 movmem_optab[i] = CODE_FOR_nothing;
5166 cmpstr_optab[i] = CODE_FOR_nothing;
5167 cmpstrn_optab[i] = CODE_FOR_nothing;
5168 cmpmem_optab[i] = CODE_FOR_nothing;
5169 setmem_optab[i] = CODE_FOR_nothing;
5171 sync_add_optab[i] = CODE_FOR_nothing;
5172 sync_sub_optab[i] = CODE_FOR_nothing;
5173 sync_ior_optab[i] = CODE_FOR_nothing;
5174 sync_and_optab[i] = CODE_FOR_nothing;
5175 sync_xor_optab[i] = CODE_FOR_nothing;
5176 sync_nand_optab[i] = CODE_FOR_nothing;
5177 sync_old_add_optab[i] = CODE_FOR_nothing;
5178 sync_old_sub_optab[i] = CODE_FOR_nothing;
5179 sync_old_ior_optab[i] = CODE_FOR_nothing;
5180 sync_old_and_optab[i] = CODE_FOR_nothing;
5181 sync_old_xor_optab[i] = CODE_FOR_nothing;
5182 sync_old_nand_optab[i] = CODE_FOR_nothing;
5183 sync_new_add_optab[i] = CODE_FOR_nothing;
5184 sync_new_sub_optab[i] = CODE_FOR_nothing;
5185 sync_new_ior_optab[i] = CODE_FOR_nothing;
5186 sync_new_and_optab[i] = CODE_FOR_nothing;
5187 sync_new_xor_optab[i] = CODE_FOR_nothing;
5188 sync_new_nand_optab[i] = CODE_FOR_nothing;
5189 sync_compare_and_swap[i] = CODE_FOR_nothing;
5190 sync_compare_and_swap_cc[i] = CODE_FOR_nothing;
5191 sync_lock_test_and_set[i] = CODE_FOR_nothing;
5192 sync_lock_release[i] = CODE_FOR_nothing;
5194 reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
5197 /* Fill in the optabs with the insns we support. */
5198 init_all_optabs ();
5200 /* Initialize the optabs with the names of the library functions. */
5201 init_integral_libfuncs (add_optab, "add", '3');
5202 init_floating_libfuncs (add_optab, "add", '3');
5203 init_integral_libfuncs (addv_optab, "addv", '3');
5204 init_floating_libfuncs (addv_optab, "add", '3');
5205 init_integral_libfuncs (sub_optab, "sub", '3');
5206 init_floating_libfuncs (sub_optab, "sub", '3');
5207 init_integral_libfuncs (subv_optab, "subv", '3');
5208 init_floating_libfuncs (subv_optab, "sub", '3');
5209 init_integral_libfuncs (smul_optab, "mul", '3');
5210 init_floating_libfuncs (smul_optab, "mul", '3');
5211 init_integral_libfuncs (smulv_optab, "mulv", '3');
5212 init_floating_libfuncs (smulv_optab, "mul", '3');
5213 init_integral_libfuncs (sdiv_optab, "div", '3');
5214 init_floating_libfuncs (sdiv_optab, "div", '3');
5215 init_integral_libfuncs (sdivv_optab, "divv", '3');
5216 init_integral_libfuncs (udiv_optab, "udiv", '3');
5217 init_integral_libfuncs (sdivmod_optab, "divmod", '4');
5218 init_integral_libfuncs (udivmod_optab, "udivmod", '4');
5219 init_integral_libfuncs (smod_optab, "mod", '3');
5220 init_integral_libfuncs (umod_optab, "umod", '3');
5221 init_floating_libfuncs (ftrunc_optab, "ftrunc", '2');
5222 init_integral_libfuncs (and_optab, "and", '3');
5223 init_integral_libfuncs (ior_optab, "ior", '3');
5224 init_integral_libfuncs (xor_optab, "xor", '3');
5225 init_integral_libfuncs (ashl_optab, "ashl", '3');
5226 init_integral_libfuncs (ashr_optab, "ashr", '3');
5227 init_integral_libfuncs (lshr_optab, "lshr", '3');
5228 init_integral_libfuncs (smin_optab, "min", '3');
5229 init_floating_libfuncs (smin_optab, "min", '3');
5230 init_integral_libfuncs (smax_optab, "max", '3');
5231 init_floating_libfuncs (smax_optab, "max", '3');
5232 init_integral_libfuncs (umin_optab, "umin", '3');
5233 init_integral_libfuncs (umax_optab, "umax", '3');
5234 init_integral_libfuncs (neg_optab, "neg", '2');
5235 init_floating_libfuncs (neg_optab, "neg", '2');
5236 init_integral_libfuncs (negv_optab, "negv", '2');
5237 init_floating_libfuncs (negv_optab, "neg", '2');
5238 init_integral_libfuncs (one_cmpl_optab, "one_cmpl", '2');
5239 init_integral_libfuncs (ffs_optab, "ffs", '2');
5240 init_integral_libfuncs (clz_optab, "clz", '2');
5241 init_integral_libfuncs (ctz_optab, "ctz", '2');
5242 init_integral_libfuncs (popcount_optab, "popcount", '2');
5243 init_integral_libfuncs (parity_optab, "parity", '2');
5245 /* Comparison libcalls for integers MUST come in pairs,
5246 signed/unsigned. */
5247 init_integral_libfuncs (cmp_optab, "cmp", '2');
5248 init_integral_libfuncs (ucmp_optab, "ucmp", '2');
5249 init_floating_libfuncs (cmp_optab, "cmp", '2');
5251 /* EQ etc are floating point only. */
5252 init_floating_libfuncs (eq_optab, "eq", '2');
5253 init_floating_libfuncs (ne_optab, "ne", '2');
5254 init_floating_libfuncs (gt_optab, "gt", '2');
5255 init_floating_libfuncs (ge_optab, "ge", '2');
5256 init_floating_libfuncs (lt_optab, "lt", '2');
5257 init_floating_libfuncs (le_optab, "le", '2');
5258 init_floating_libfuncs (unord_optab, "unord", '2');
5260 init_floating_libfuncs (powi_optab, "powi", '2');
5262 /* Conversions. */
5263 init_interclass_conv_libfuncs (sfloat_optab, "float",
5264 MODE_INT, MODE_FLOAT);
5265 init_interclass_conv_libfuncs (ufloat_optab, "floatun",
5266 MODE_INT, MODE_FLOAT);
5267 init_interclass_conv_libfuncs (sfix_optab, "fix",
5268 MODE_FLOAT, MODE_INT);
5269 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5270 MODE_FLOAT, MODE_INT);
5272 /* sext_optab is also used for FLOAT_EXTEND. */
5273 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, true);
5274 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, false);
5276 /* Use cabs for double complex abs, since systems generally have cabs.
5277 Don't define any libcall for float complex, so that cabs will be used. */
5278 if (complex_double_type_node)
5279 abs_optab->handlers[TYPE_MODE (complex_double_type_node)].libfunc
5280 = init_one_libfunc ("cabs");
5282 /* The ffs function operates on `int'. */
5283 ffs_optab->handlers[(int) mode_for_size (INT_TYPE_SIZE, MODE_INT, 0)].libfunc
5284 = init_one_libfunc ("ffs");
5286 abort_libfunc = init_one_libfunc ("abort");
5287 memcpy_libfunc = init_one_libfunc ("memcpy");
5288 memmove_libfunc = init_one_libfunc ("memmove");
5289 memcmp_libfunc = init_one_libfunc ("memcmp");
5290 memset_libfunc = init_one_libfunc ("memset");
5291 setbits_libfunc = init_one_libfunc ("__setbits");
5293 #ifndef DONT_USE_BUILTIN_SETJMP
5294 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
5295 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
5296 #else
5297 setjmp_libfunc = init_one_libfunc ("setjmp");
5298 longjmp_libfunc = init_one_libfunc ("longjmp");
5299 #endif
5300 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
5301 unwind_sjlj_unregister_libfunc
5302 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5304 /* For function entry/exit instrumentation. */
5305 profile_function_entry_libfunc
5306 = init_one_libfunc ("__cyg_profile_func_enter");
5307 profile_function_exit_libfunc
5308 = init_one_libfunc ("__cyg_profile_func_exit");
5310 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
5312 if (HAVE_conditional_trap)
5313 trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
5315 /* Allow the target to add more libcalls or rename some, etc. */
5316 targetm.init_libfuncs ();
5319 #ifdef DEBUG
5321 /* Print information about the current contents of the optabs on
5322 STDERR. */
5324 static void
5325 debug_optab_libfuncs (void)
5327 int i;
5328 int j;
5329 int k;
5331 /* Dump the arithmetic optabs. */
5332 for (i = 0; i != (int) OTI_MAX; i++)
5333 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5335 optab o;
5336 struct optab_handlers *h;
5338 o = optab_table[i];
5339 h = &o->handlers[j];
5340 if (h->libfunc)
5342 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5343 fprintf (stderr, "%s\t%s:\t%s\n",
5344 GET_RTX_NAME (o->code),
5345 GET_MODE_NAME (j),
5346 XSTR (h->libfunc, 0));
5350 /* Dump the conversion optabs. */
5351 for (i = 0; i < (int) COI_MAX; ++i)
5352 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5353 for (k = 0; k < NUM_MACHINE_MODES; ++k)
5355 convert_optab o;
5356 struct optab_handlers *h;
5358 o = &convert_optab_table[i];
5359 h = &o->handlers[j][k];
5360 if (h->libfunc)
5362 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5363 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
5364 GET_RTX_NAME (o->code),
5365 GET_MODE_NAME (j),
5366 GET_MODE_NAME (k),
5367 XSTR (h->libfunc, 0));
5372 #endif /* DEBUG */
5375 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5376 CODE. Return 0 on failure. */
5379 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1,
5380 rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED)
5382 enum machine_mode mode = GET_MODE (op1);
5383 enum insn_code icode;
5384 rtx insn;
5386 if (!HAVE_conditional_trap)
5387 return 0;
5389 if (mode == VOIDmode)
5390 return 0;
5392 icode = cmp_optab->handlers[(int) mode].insn_code;
5393 if (icode == CODE_FOR_nothing)
5394 return 0;
5396 start_sequence ();
5397 op1 = prepare_operand (icode, op1, 0, mode, mode, 0);
5398 op2 = prepare_operand (icode, op2, 1, mode, mode, 0);
5399 if (!op1 || !op2)
5401 end_sequence ();
5402 return 0;
5404 emit_insn (GEN_FCN (icode) (op1, op2));
5406 PUT_CODE (trap_rtx, code);
5407 gcc_assert (HAVE_conditional_trap);
5408 insn = gen_conditional_trap (trap_rtx, tcode);
5409 if (insn)
5411 emit_insn (insn);
5412 insn = get_insns ();
5414 end_sequence ();
5416 return insn;
5419 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5420 or unsigned operation code. */
5422 static enum rtx_code
5423 get_rtx_code (enum tree_code tcode, bool unsignedp)
5425 enum rtx_code code;
5426 switch (tcode)
5428 case EQ_EXPR:
5429 code = EQ;
5430 break;
5431 case NE_EXPR:
5432 code = NE;
5433 break;
5434 case LT_EXPR:
5435 code = unsignedp ? LTU : LT;
5436 break;
5437 case LE_EXPR:
5438 code = unsignedp ? LEU : LE;
5439 break;
5440 case GT_EXPR:
5441 code = unsignedp ? GTU : GT;
5442 break;
5443 case GE_EXPR:
5444 code = unsignedp ? GEU : GE;
5445 break;
5447 case UNORDERED_EXPR:
5448 code = UNORDERED;
5449 break;
5450 case ORDERED_EXPR:
5451 code = ORDERED;
5452 break;
5453 case UNLT_EXPR:
5454 code = UNLT;
5455 break;
5456 case UNLE_EXPR:
5457 code = UNLE;
5458 break;
5459 case UNGT_EXPR:
5460 code = UNGT;
5461 break;
5462 case UNGE_EXPR:
5463 code = UNGE;
5464 break;
5465 case UNEQ_EXPR:
5466 code = UNEQ;
5467 break;
5468 case LTGT_EXPR:
5469 code = LTGT;
5470 break;
5472 default:
5473 gcc_unreachable ();
5475 return code;
5478 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5479 unsigned operators. Do not generate compare instruction. */
5481 static rtx
5482 vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
5484 enum rtx_code rcode;
5485 tree t_op0, t_op1;
5486 rtx rtx_op0, rtx_op1;
5488 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
5489 ensures that condition is a relational operation. */
5490 gcc_assert (COMPARISON_CLASS_P (cond));
5492 rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
5493 t_op0 = TREE_OPERAND (cond, 0);
5494 t_op1 = TREE_OPERAND (cond, 1);
5496 /* Expand operands. */
5497 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)), 1);
5498 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)), 1);
5500 if (!insn_data[icode].operand[4].predicate (rtx_op0, GET_MODE (rtx_op0))
5501 && GET_MODE (rtx_op0) != VOIDmode)
5502 rtx_op0 = force_reg (GET_MODE (rtx_op0), rtx_op0);
5504 if (!insn_data[icode].operand[5].predicate (rtx_op1, GET_MODE (rtx_op1))
5505 && GET_MODE (rtx_op1) != VOIDmode)
5506 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
5508 return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1);
5511 /* Return insn code for VEC_COND_EXPR EXPR. */
5513 static inline enum insn_code
5514 get_vcond_icode (tree expr, enum machine_mode mode)
5516 enum insn_code icode = CODE_FOR_nothing;
5518 if (TYPE_UNSIGNED (TREE_TYPE (expr)))
5519 icode = vcondu_gen_code[mode];
5520 else
5521 icode = vcond_gen_code[mode];
5522 return icode;
5525 /* Return TRUE iff, appropriate vector insns are available
5526 for vector cond expr expr in VMODE mode. */
5528 bool
5529 expand_vec_cond_expr_p (tree expr, enum machine_mode vmode)
5531 if (get_vcond_icode (expr, vmode) == CODE_FOR_nothing)
5532 return false;
5533 return true;
5536 /* Generate insns for VEC_COND_EXPR. */
5539 expand_vec_cond_expr (tree vec_cond_expr, rtx target)
5541 enum insn_code icode;
5542 rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1;
5543 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_cond_expr));
5544 bool unsignedp = TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr));
5546 icode = get_vcond_icode (vec_cond_expr, mode);
5547 if (icode == CODE_FOR_nothing)
5548 return 0;
5550 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5551 target = gen_reg_rtx (mode);
5553 /* Get comparison rtx. First expand both cond expr operands. */
5554 comparison = vector_compare_rtx (TREE_OPERAND (vec_cond_expr, 0),
5555 unsignedp, icode);
5556 cc_op0 = XEXP (comparison, 0);
5557 cc_op1 = XEXP (comparison, 1);
5558 /* Expand both operands and force them in reg, if required. */
5559 rtx_op1 = expand_expr (TREE_OPERAND (vec_cond_expr, 1),
5560 NULL_RTX, VOIDmode, EXPAND_NORMAL);
5561 if (!insn_data[icode].operand[1].predicate (rtx_op1, mode)
5562 && mode != VOIDmode)
5563 rtx_op1 = force_reg (mode, rtx_op1);
5565 rtx_op2 = expand_expr (TREE_OPERAND (vec_cond_expr, 2),
5566 NULL_RTX, VOIDmode, EXPAND_NORMAL);
5567 if (!insn_data[icode].operand[2].predicate (rtx_op2, mode)
5568 && mode != VOIDmode)
5569 rtx_op2 = force_reg (mode, rtx_op2);
5571 /* Emit instruction! */
5572 emit_insn (GEN_FCN (icode) (target, rtx_op1, rtx_op2,
5573 comparison, cc_op0, cc_op1));
5575 return target;
5579 /* This is an internal subroutine of the other compare_and_swap expanders.
5580 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
5581 operation. TARGET is an optional place to store the value result of
5582 the operation. ICODE is the particular instruction to expand. Return
5583 the result of the operation. */
5585 static rtx
5586 expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val,
5587 rtx target, enum insn_code icode)
5589 enum machine_mode mode = GET_MODE (mem);
5590 rtx insn;
5592 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5593 target = gen_reg_rtx (mode);
5595 if (GET_MODE (old_val) != VOIDmode && GET_MODE (old_val) != mode)
5596 old_val = convert_modes (mode, GET_MODE (old_val), old_val, 1);
5597 if (!insn_data[icode].operand[2].predicate (old_val, mode))
5598 old_val = force_reg (mode, old_val);
5600 if (GET_MODE (new_val) != VOIDmode && GET_MODE (new_val) != mode)
5601 new_val = convert_modes (mode, GET_MODE (new_val), new_val, 1);
5602 if (!insn_data[icode].operand[3].predicate (new_val, mode))
5603 new_val = force_reg (mode, new_val);
5605 insn = GEN_FCN (icode) (target, mem, old_val, new_val);
5606 if (insn == NULL_RTX)
5607 return NULL_RTX;
5608 emit_insn (insn);
5610 return target;
5613 /* Expand a compare-and-swap operation and return its value. */
5616 expand_val_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
5618 enum machine_mode mode = GET_MODE (mem);
5619 enum insn_code icode = sync_compare_and_swap[mode];
5621 if (icode == CODE_FOR_nothing)
5622 return NULL_RTX;
5624 return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode);
5627 /* Expand a compare-and-swap operation and store true into the result if
5628 the operation was successful and false otherwise. Return the result.
5629 Unlike other routines, TARGET is not optional. */
5632 expand_bool_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
5634 enum machine_mode mode = GET_MODE (mem);
5635 enum insn_code icode;
5636 rtx subtarget, label0, label1;
5638 /* If the target supports a compare-and-swap pattern that simultaneously
5639 sets some flag for success, then use it. Otherwise use the regular
5640 compare-and-swap and follow that immediately with a compare insn. */
5641 icode = sync_compare_and_swap_cc[mode];
5642 switch (icode)
5644 default:
5645 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
5646 NULL_RTX, icode);
5647 if (subtarget != NULL_RTX)
5648 break;
5650 /* FALLTHRU */
5651 case CODE_FOR_nothing:
5652 icode = sync_compare_and_swap[mode];
5653 if (icode == CODE_FOR_nothing)
5654 return NULL_RTX;
5656 /* Ensure that if old_val == mem, that we're not comparing
5657 against an old value. */
5658 if (MEM_P (old_val))
5659 old_val = force_reg (mode, old_val);
5661 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
5662 NULL_RTX, icode);
5663 if (subtarget == NULL_RTX)
5664 return NULL_RTX;
5666 emit_cmp_insn (subtarget, old_val, EQ, const0_rtx, mode, true);
5669 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
5670 setcc instruction from the beginning. We don't work too hard here,
5671 but it's nice to not be stupid about initial code gen either. */
5672 if (STORE_FLAG_VALUE == 1)
5674 icode = setcc_gen_code[EQ];
5675 if (icode != CODE_FOR_nothing)
5677 enum machine_mode cmode = insn_data[icode].operand[0].mode;
5678 rtx insn;
5680 subtarget = target;
5681 if (!insn_data[icode].operand[0].predicate (target, cmode))
5682 subtarget = gen_reg_rtx (cmode);
5684 insn = GEN_FCN (icode) (subtarget);
5685 if (insn)
5687 emit_insn (insn);
5688 if (GET_MODE (target) != GET_MODE (subtarget))
5690 convert_move (target, subtarget, 1);
5691 subtarget = target;
5693 return subtarget;
5698 /* Without an appropriate setcc instruction, use a set of branches to
5699 get 1 and 0 stored into target. Presumably if the target has a
5700 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
5702 label0 = gen_label_rtx ();
5703 label1 = gen_label_rtx ();
5705 emit_jump_insn (bcc_gen_fctn[EQ] (label0));
5706 emit_move_insn (target, const0_rtx);
5707 emit_jump_insn (gen_jump (label1));
5708 emit_barrier ();
5709 emit_label (label0);
5710 emit_move_insn (target, const1_rtx);
5711 emit_label (label1);
5713 return target;
5716 /* This is a helper function for the other atomic operations. This function
5717 emits a loop that contains SEQ that iterates until a compare-and-swap
5718 operation at the end succeeds. MEM is the memory to be modified. SEQ is
5719 a set of instructions that takes a value from OLD_REG as an input and
5720 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
5721 set to the current contents of MEM. After SEQ, a compare-and-swap will
5722 attempt to update MEM with NEW_REG. The function returns true when the
5723 loop was generated successfully. */
5725 static bool
5726 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
5728 enum machine_mode mode = GET_MODE (mem);
5729 enum insn_code icode;
5730 rtx label, cmp_reg, subtarget;
5732 /* The loop we want to generate looks like
5734 cmp_reg = mem;
5735 label:
5736 old_reg = cmp_reg;
5737 seq;
5738 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
5739 if (cmp_reg != old_reg)
5740 goto label;
5742 Note that we only do the plain load from memory once. Subsequent
5743 iterations use the value loaded by the compare-and-swap pattern. */
5745 label = gen_label_rtx ();
5746 cmp_reg = gen_reg_rtx (mode);
5748 emit_move_insn (cmp_reg, mem);
5749 emit_label (label);
5750 emit_move_insn (old_reg, cmp_reg);
5751 if (seq)
5752 emit_insn (seq);
5754 /* If the target supports a compare-and-swap pattern that simultaneously
5755 sets some flag for success, then use it. Otherwise use the regular
5756 compare-and-swap and follow that immediately with a compare insn. */
5757 icode = sync_compare_and_swap_cc[mode];
5758 switch (icode)
5760 default:
5761 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
5762 cmp_reg, icode);
5763 if (subtarget != NULL_RTX)
5765 gcc_assert (subtarget == cmp_reg);
5766 break;
5769 /* FALLTHRU */
5770 case CODE_FOR_nothing:
5771 icode = sync_compare_and_swap[mode];
5772 if (icode == CODE_FOR_nothing)
5773 return false;
5775 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
5776 cmp_reg, icode);
5777 if (subtarget == NULL_RTX)
5778 return false;
5779 if (subtarget != cmp_reg)
5780 emit_move_insn (cmp_reg, subtarget);
5782 emit_cmp_insn (cmp_reg, old_reg, EQ, const0_rtx, mode, true);
5785 /* ??? Mark this jump predicted not taken? */
5786 emit_jump_insn (bcc_gen_fctn[NE] (label));
5788 return true;
5791 /* This function generates the atomic operation MEM CODE= VAL. In this
5792 case, we do not care about any resulting value. Returns NULL if we
5793 cannot generate the operation. */
5796 expand_sync_operation (rtx mem, rtx val, enum rtx_code code)
5798 enum machine_mode mode = GET_MODE (mem);
5799 enum insn_code icode;
5800 rtx insn;
5802 /* Look to see if the target supports the operation directly. */
5803 switch (code)
5805 case PLUS:
5806 icode = sync_add_optab[mode];
5807 break;
5808 case IOR:
5809 icode = sync_ior_optab[mode];
5810 break;
5811 case XOR:
5812 icode = sync_xor_optab[mode];
5813 break;
5814 case AND:
5815 icode = sync_and_optab[mode];
5816 break;
5817 case NOT:
5818 icode = sync_nand_optab[mode];
5819 break;
5821 case MINUS:
5822 icode = sync_sub_optab[mode];
5823 if (icode == CODE_FOR_nothing)
5825 icode = sync_add_optab[mode];
5826 if (icode != CODE_FOR_nothing)
5828 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
5829 code = PLUS;
5832 break;
5834 default:
5835 gcc_unreachable ();
5838 /* Generate the direct operation, if present. */
5839 if (icode != CODE_FOR_nothing)
5841 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
5842 val = convert_modes (mode, GET_MODE (val), val, 1);
5843 if (!insn_data[icode].operand[1].predicate (val, mode))
5844 val = force_reg (mode, val);
5846 insn = GEN_FCN (icode) (mem, val);
5847 if (insn)
5849 emit_insn (insn);
5850 return const0_rtx;
5854 /* Failing that, generate a compare-and-swap loop in which we perform the
5855 operation with normal arithmetic instructions. */
5856 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
5858 rtx t0 = gen_reg_rtx (mode), t1;
5860 start_sequence ();
5862 t1 = t0;
5863 if (code == NOT)
5865 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
5866 code = AND;
5868 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
5869 true, OPTAB_LIB_WIDEN);
5871 insn = get_insns ();
5872 end_sequence ();
5874 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
5875 return const0_rtx;
5878 return NULL_RTX;
5881 /* This function generates the atomic operation MEM CODE= VAL. In this
5882 case, we do care about the resulting value: if AFTER is true then
5883 return the value MEM holds after the operation, if AFTER is false
5884 then return the value MEM holds before the operation. TARGET is an
5885 optional place for the result value to be stored. */
5888 expand_sync_fetch_operation (rtx mem, rtx val, enum rtx_code code,
5889 bool after, rtx target)
5891 enum machine_mode mode = GET_MODE (mem);
5892 enum insn_code old_code, new_code, icode;
5893 bool compensate;
5894 rtx insn;
5896 /* Look to see if the target supports the operation directly. */
5897 switch (code)
5899 case PLUS:
5900 old_code = sync_old_add_optab[mode];
5901 new_code = sync_new_add_optab[mode];
5902 break;
5903 case IOR:
5904 old_code = sync_old_ior_optab[mode];
5905 new_code = sync_new_ior_optab[mode];
5906 break;
5907 case XOR:
5908 old_code = sync_old_xor_optab[mode];
5909 new_code = sync_new_xor_optab[mode];
5910 break;
5911 case AND:
5912 old_code = sync_old_and_optab[mode];
5913 new_code = sync_new_and_optab[mode];
5914 break;
5915 case NOT:
5916 old_code = sync_old_nand_optab[mode];
5917 new_code = sync_new_nand_optab[mode];
5918 break;
5920 case MINUS:
5921 old_code = sync_old_sub_optab[mode];
5922 new_code = sync_new_sub_optab[mode];
5923 if (old_code == CODE_FOR_nothing && new_code == CODE_FOR_nothing)
5925 old_code = sync_old_add_optab[mode];
5926 new_code = sync_new_add_optab[mode];
5927 if (old_code != CODE_FOR_nothing || new_code != CODE_FOR_nothing)
5929 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
5930 code = PLUS;
5933 break;
5935 default:
5936 gcc_unreachable ();
5939 /* If the target does supports the proper new/old operation, great. But
5940 if we only support the opposite old/new operation, check to see if we
5941 can compensate. In the case in which the old value is supported, then
5942 we can always perform the operation again with normal arithmetic. In
5943 the case in which the new value is supported, then we can only handle
5944 this in the case the operation is reversible. */
5945 compensate = false;
5946 if (after)
5948 icode = new_code;
5949 if (icode == CODE_FOR_nothing)
5951 icode = old_code;
5952 if (icode != CODE_FOR_nothing)
5953 compensate = true;
5956 else
5958 icode = old_code;
5959 if (icode == CODE_FOR_nothing
5960 && (code == PLUS || code == MINUS || code == XOR))
5962 icode = new_code;
5963 if (icode != CODE_FOR_nothing)
5964 compensate = true;
5968 /* If we found something supported, great. */
5969 if (icode != CODE_FOR_nothing)
5971 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5972 target = gen_reg_rtx (mode);
5974 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
5975 val = convert_modes (mode, GET_MODE (val), val, 1);
5976 if (!insn_data[icode].operand[2].predicate (val, mode))
5977 val = force_reg (mode, val);
5979 insn = GEN_FCN (icode) (target, mem, val);
5980 if (insn)
5982 emit_insn (insn);
5984 /* If we need to compensate for using an operation with the
5985 wrong return value, do so now. */
5986 if (compensate)
5988 if (!after)
5990 if (code == PLUS)
5991 code = MINUS;
5992 else if (code == MINUS)
5993 code = PLUS;
5996 if (code == NOT)
5997 target = expand_simple_unop (mode, NOT, target, NULL_RTX, true);
5998 target = expand_simple_binop (mode, code, target, val, NULL_RTX,
5999 true, OPTAB_LIB_WIDEN);
6002 return target;
6006 /* Failing that, generate a compare-and-swap loop in which we perform the
6007 operation with normal arithmetic instructions. */
6008 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6010 rtx t0 = gen_reg_rtx (mode), t1;
6012 if (!target || !register_operand (target, mode))
6013 target = gen_reg_rtx (mode);
6015 start_sequence ();
6017 if (!after)
6018 emit_move_insn (target, t0);
6019 t1 = t0;
6020 if (code == NOT)
6022 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6023 code = AND;
6025 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6026 true, OPTAB_LIB_WIDEN);
6027 if (after)
6028 emit_move_insn (target, t1);
6030 insn = get_insns ();
6031 end_sequence ();
6033 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6034 return target;
6037 return NULL_RTX;
6040 /* This function expands a test-and-set operation. Ideally we atomically
6041 store VAL in MEM and return the previous value in MEM. Some targets
6042 may not support this operation and only support VAL with the constant 1;
6043 in this case while the return value will be 0/1, but the exact value
6044 stored in MEM is target defined. TARGET is an option place to stick
6045 the return value. */
6048 expand_sync_lock_test_and_set (rtx mem, rtx val, rtx target)
6050 enum machine_mode mode = GET_MODE (mem);
6051 enum insn_code icode;
6052 rtx insn;
6054 /* If the target supports the test-and-set directly, great. */
6055 icode = sync_lock_test_and_set[mode];
6056 if (icode != CODE_FOR_nothing)
6058 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6059 target = gen_reg_rtx (mode);
6061 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6062 val = convert_modes (mode, GET_MODE (val), val, 1);
6063 if (!insn_data[icode].operand[2].predicate (val, mode))
6064 val = force_reg (mode, val);
6066 insn = GEN_FCN (icode) (target, mem, val);
6067 if (insn)
6069 emit_insn (insn);
6070 return target;
6074 /* Otherwise, use a compare-and-swap loop for the exchange. */
6075 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6077 if (!target || !register_operand (target, mode))
6078 target = gen_reg_rtx (mode);
6079 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6080 val = convert_modes (mode, GET_MODE (val), val, 1);
6081 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
6082 return target;
6085 return NULL_RTX;
6088 #include "gt-optabs.h"