2007-01-03 Paul Brook <paul@codesourcery.com>
[official-gcc.git] / gcc / optabs.c
blob9a7731d221434b76dc62bc511b929368a92e8900
1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "toplev.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
32 #include "rtl.h"
33 #include "tree.h"
34 #include "tm_p.h"
35 #include "flags.h"
36 #include "function.h"
37 #include "except.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "libfuncs.h"
41 #include "recog.h"
42 #include "reload.h"
43 #include "ggc.h"
44 #include "real.h"
45 #include "basic-block.h"
46 #include "target.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 optab optab_table[OTI_MAX];
59 rtx libfunc_table[LTI_MAX];
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table[COI_MAX];
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab[NUM_RTX_CODE + 1];
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
70 rtxfun bcc_gen_fctn[NUM_RTX_CODE];
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
76 enum insn_code setcc_gen_code[NUM_RTX_CODE];
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
84 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
85 #endif
87 /* Indexed by the machine mode, gives the insn code for vector conditional
88 operation. */
90 enum insn_code vcond_gen_code[NUM_MACHINE_MODES];
91 enum insn_code vcondu_gen_code[NUM_MACHINE_MODES];
93 /* The insn generating function can not take an rtx_code argument.
94 TRAP_RTX is used as an rtx argument. Its code is replaced with
95 the code to be used in the trap insn and all other fields are ignored. */
96 static GTY(()) rtx trap_rtx;
98 static int add_equal_note (rtx, rtx, enum rtx_code, rtx, rtx);
99 static rtx widen_operand (rtx, enum machine_mode, enum machine_mode, int,
100 int);
101 static void prepare_cmp_insn (rtx *, rtx *, enum rtx_code *, rtx,
102 enum machine_mode *, int *,
103 enum can_compare_purpose);
104 static enum insn_code can_fix_p (enum machine_mode, enum machine_mode, int,
105 int *);
106 static enum insn_code can_float_p (enum machine_mode, enum machine_mode, int);
107 static optab new_optab (void);
108 static convert_optab new_convert_optab (void);
109 static inline optab init_optab (enum rtx_code);
110 static inline optab init_optabv (enum rtx_code);
111 static inline convert_optab init_convert_optab (enum rtx_code);
112 static void init_libfuncs (optab, int, int, const char *, int);
113 static void init_integral_libfuncs (optab, const char *, int);
114 static void init_floating_libfuncs (optab, const char *, int);
115 static void init_interclass_conv_libfuncs (convert_optab, const char *,
116 enum mode_class, enum mode_class);
117 static void init_intraclass_conv_libfuncs (convert_optab, const char *,
118 enum mode_class, bool);
119 static void emit_cmp_and_jump_insn_1 (rtx, rtx, enum machine_mode,
120 enum rtx_code, int, rtx);
121 static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *,
122 enum machine_mode *, int *);
123 static rtx widen_clz (enum machine_mode, rtx, rtx);
124 static rtx expand_parity (enum machine_mode, rtx, rtx);
125 static enum rtx_code get_rtx_code (enum tree_code, bool);
126 static rtx vector_compare_rtx (tree, bool, enum insn_code);
128 #ifndef HAVE_conditional_trap
129 #define HAVE_conditional_trap 0
130 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
131 #endif
133 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
134 the result of operation CODE applied to OP0 (and OP1 if it is a binary
135 operation).
137 If the last insn does not set TARGET, don't do anything, but return 1.
139 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
140 don't add the REG_EQUAL note but return 0. Our caller can then try
141 again, ensuring that TARGET is not one of the operands. */
143 static int
144 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
146 rtx last_insn, insn, set;
147 rtx note;
149 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
151 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
152 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
153 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
154 && GET_RTX_CLASS (code) != RTX_COMPARE
155 && GET_RTX_CLASS (code) != RTX_UNARY)
156 return 1;
158 if (GET_CODE (target) == ZERO_EXTRACT)
159 return 1;
161 for (last_insn = insns;
162 NEXT_INSN (last_insn) != NULL_RTX;
163 last_insn = NEXT_INSN (last_insn))
166 set = single_set (last_insn);
167 if (set == NULL_RTX)
168 return 1;
170 if (! rtx_equal_p (SET_DEST (set), target)
171 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
172 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
173 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
174 return 1;
176 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
177 besides the last insn. */
178 if (reg_overlap_mentioned_p (target, op0)
179 || (op1 && reg_overlap_mentioned_p (target, op1)))
181 insn = PREV_INSN (last_insn);
182 while (insn != NULL_RTX)
184 if (reg_set_p (target, insn))
185 return 0;
187 insn = PREV_INSN (insn);
191 if (GET_RTX_CLASS (code) == RTX_UNARY)
192 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
193 else
194 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
196 set_unique_reg_note (last_insn, REG_EQUAL, note);
198 return 1;
201 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
202 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
203 not actually do a sign-extend or zero-extend, but can leave the
204 higher-order bits of the result rtx undefined, for example, in the case
205 of logical operations, but not right shifts. */
207 static rtx
208 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
209 int unsignedp, int no_extend)
211 rtx result;
213 /* If we don't have to extend and this is a constant, return it. */
214 if (no_extend && GET_MODE (op) == VOIDmode)
215 return op;
217 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
218 extend since it will be more efficient to do so unless the signedness of
219 a promoted object differs from our extension. */
220 if (! no_extend
221 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
222 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
223 return convert_modes (mode, oldmode, op, unsignedp);
225 /* If MODE is no wider than a single word, we return a paradoxical
226 SUBREG. */
227 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
228 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
230 /* Otherwise, get an object of MODE, clobber it, and set the low-order
231 part to OP. */
233 result = gen_reg_rtx (mode);
234 emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
235 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
236 return result;
239 /* Return the optab used for computing the operation given by
240 the tree code, CODE. This function is not always usable (for
241 example, it cannot give complete results for multiplication
242 or division) but probably ought to be relied on more widely
243 throughout the expander. */
244 optab
245 optab_for_tree_code (enum tree_code code, tree type)
247 bool trapv;
248 switch (code)
250 case BIT_AND_EXPR:
251 return and_optab;
253 case BIT_IOR_EXPR:
254 return ior_optab;
256 case BIT_NOT_EXPR:
257 return one_cmpl_optab;
259 case BIT_XOR_EXPR:
260 return xor_optab;
262 case TRUNC_MOD_EXPR:
263 case CEIL_MOD_EXPR:
264 case FLOOR_MOD_EXPR:
265 case ROUND_MOD_EXPR:
266 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
268 case RDIV_EXPR:
269 case TRUNC_DIV_EXPR:
270 case CEIL_DIV_EXPR:
271 case FLOOR_DIV_EXPR:
272 case ROUND_DIV_EXPR:
273 case EXACT_DIV_EXPR:
274 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
276 case LSHIFT_EXPR:
277 return ashl_optab;
279 case RSHIFT_EXPR:
280 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
282 case LROTATE_EXPR:
283 return rotl_optab;
285 case RROTATE_EXPR:
286 return rotr_optab;
288 case MAX_EXPR:
289 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
291 case MIN_EXPR:
292 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
294 case REALIGN_LOAD_EXPR:
295 return vec_realign_load_optab;
297 case WIDEN_SUM_EXPR:
298 return TYPE_UNSIGNED (type) ? usum_widen_optab : ssum_widen_optab;
300 case DOT_PROD_EXPR:
301 return TYPE_UNSIGNED (type) ? udot_prod_optab : sdot_prod_optab;
303 case REDUC_MAX_EXPR:
304 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
306 case REDUC_MIN_EXPR:
307 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
309 case REDUC_PLUS_EXPR:
310 return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
312 case VEC_LSHIFT_EXPR:
313 return vec_shl_optab;
315 case VEC_RSHIFT_EXPR:
316 return vec_shr_optab;
318 case VEC_WIDEN_MULT_HI_EXPR:
319 return TYPE_UNSIGNED (type) ?
320 vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
322 case VEC_WIDEN_MULT_LO_EXPR:
323 return TYPE_UNSIGNED (type) ?
324 vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
326 case VEC_UNPACK_HI_EXPR:
327 return TYPE_UNSIGNED (type) ?
328 vec_unpacku_hi_optab : vec_unpacks_hi_optab;
330 case VEC_UNPACK_LO_EXPR:
331 return TYPE_UNSIGNED (type) ?
332 vec_unpacku_lo_optab : vec_unpacks_lo_optab;
334 case VEC_PACK_MOD_EXPR:
335 return vec_pack_mod_optab;
337 case VEC_PACK_SAT_EXPR:
338 return TYPE_UNSIGNED (type) ? vec_pack_usat_optab : vec_pack_ssat_optab;
340 default:
341 break;
344 trapv = flag_trapv && INTEGRAL_TYPE_P (type) && !TYPE_UNSIGNED (type);
345 switch (code)
347 case PLUS_EXPR:
348 return trapv ? addv_optab : add_optab;
350 case MINUS_EXPR:
351 return trapv ? subv_optab : sub_optab;
353 case MULT_EXPR:
354 return trapv ? smulv_optab : smul_optab;
356 case NEGATE_EXPR:
357 return trapv ? negv_optab : neg_optab;
359 case ABS_EXPR:
360 return trapv ? absv_optab : abs_optab;
362 case VEC_EXTRACT_EVEN_EXPR:
363 return vec_extract_even_optab;
365 case VEC_EXTRACT_ODD_EXPR:
366 return vec_extract_odd_optab;
368 case VEC_INTERLEAVE_HIGH_EXPR:
369 return vec_interleave_high_optab;
371 case VEC_INTERLEAVE_LOW_EXPR:
372 return vec_interleave_low_optab;
374 default:
375 return NULL;
380 /* Expand vector widening operations.
382 There are two different classes of operations handled here:
383 1) Operations whose result is wider than all the arguments to the operation.
384 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
385 In this case OP0 and optionally OP1 would be initialized,
386 but WIDE_OP wouldn't (not relevant for this case).
387 2) Operations whose result is of the same size as the last argument to the
388 operation, but wider than all the other arguments to the operation.
389 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
390 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
392 E.g, when called to expand the following operations, this is how
393 the arguments will be initialized:
394 nops OP0 OP1 WIDE_OP
395 widening-sum 2 oprnd0 - oprnd1
396 widening-dot-product 3 oprnd0 oprnd1 oprnd2
397 widening-mult 2 oprnd0 oprnd1 -
398 type-promotion (vec-unpack) 1 oprnd0 - - */
401 expand_widen_pattern_expr (tree exp, rtx op0, rtx op1, rtx wide_op, rtx target,
402 int unsignedp)
404 tree oprnd0, oprnd1, oprnd2;
405 enum machine_mode wmode = 0, tmode0, tmode1 = 0;
406 optab widen_pattern_optab;
407 int icode;
408 enum machine_mode xmode0, xmode1 = 0, wxmode = 0;
409 rtx temp;
410 rtx pat;
411 rtx xop0, xop1, wxop;
412 int nops = TREE_CODE_LENGTH (TREE_CODE (exp));
414 oprnd0 = TREE_OPERAND (exp, 0);
415 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
416 widen_pattern_optab =
417 optab_for_tree_code (TREE_CODE (exp), TREE_TYPE (oprnd0));
418 icode = (int) widen_pattern_optab->handlers[(int) tmode0].insn_code;
419 gcc_assert (icode != CODE_FOR_nothing);
420 xmode0 = insn_data[icode].operand[1].mode;
422 if (nops >= 2)
424 oprnd1 = TREE_OPERAND (exp, 1);
425 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
426 xmode1 = insn_data[icode].operand[2].mode;
429 /* The last operand is of a wider mode than the rest of the operands. */
430 if (nops == 2)
432 wmode = tmode1;
433 wxmode = xmode1;
435 else if (nops == 3)
437 gcc_assert (tmode1 == tmode0);
438 gcc_assert (op1);
439 oprnd2 = TREE_OPERAND (exp, 2);
440 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
441 wxmode = insn_data[icode].operand[3].mode;
444 if (!wide_op)
445 wmode = wxmode = insn_data[icode].operand[0].mode;
447 if (!target
448 || ! (*insn_data[icode].operand[0].predicate) (target, wmode))
449 temp = gen_reg_rtx (wmode);
450 else
451 temp = target;
453 xop0 = op0;
454 xop1 = op1;
455 wxop = wide_op;
457 /* In case the insn wants input operands in modes different from
458 those of the actual operands, convert the operands. It would
459 seem that we don't need to convert CONST_INTs, but we do, so
460 that they're properly zero-extended, sign-extended or truncated
461 for their mode. */
463 if (GET_MODE (op0) != xmode0 && xmode0 != VOIDmode)
464 xop0 = convert_modes (xmode0,
465 GET_MODE (op0) != VOIDmode
466 ? GET_MODE (op0)
467 : tmode0,
468 xop0, unsignedp);
470 if (op1)
471 if (GET_MODE (op1) != xmode1 && xmode1 != VOIDmode)
472 xop1 = convert_modes (xmode1,
473 GET_MODE (op1) != VOIDmode
474 ? GET_MODE (op1)
475 : tmode1,
476 xop1, unsignedp);
478 if (wide_op)
479 if (GET_MODE (wide_op) != wxmode && wxmode != VOIDmode)
480 wxop = convert_modes (wxmode,
481 GET_MODE (wide_op) != VOIDmode
482 ? GET_MODE (wide_op)
483 : wmode,
484 wxop, unsignedp);
486 /* Now, if insn's predicates don't allow our operands, put them into
487 pseudo regs. */
489 if (! (*insn_data[icode].operand[1].predicate) (xop0, xmode0)
490 && xmode0 != VOIDmode)
491 xop0 = copy_to_mode_reg (xmode0, xop0);
493 if (op1)
495 if (! (*insn_data[icode].operand[2].predicate) (xop1, xmode1)
496 && xmode1 != VOIDmode)
497 xop1 = copy_to_mode_reg (xmode1, xop1);
499 if (wide_op)
501 if (! (*insn_data[icode].operand[3].predicate) (wxop, wxmode)
502 && wxmode != VOIDmode)
503 wxop = copy_to_mode_reg (wxmode, wxop);
505 pat = GEN_FCN (icode) (temp, xop0, xop1, wxop);
507 else
508 pat = GEN_FCN (icode) (temp, xop0, xop1);
510 else
512 if (wide_op)
514 if (! (*insn_data[icode].operand[2].predicate) (wxop, wxmode)
515 && wxmode != VOIDmode)
516 wxop = copy_to_mode_reg (wxmode, wxop);
518 pat = GEN_FCN (icode) (temp, xop0, wxop);
520 else
521 pat = GEN_FCN (icode) (temp, xop0);
524 emit_insn (pat);
525 return temp;
528 /* Generate code to perform an operation specified by TERNARY_OPTAB
529 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
531 UNSIGNEDP is for the case where we have to widen the operands
532 to perform the operation. It says to use zero-extension.
534 If TARGET is nonzero, the value
535 is generated there, if it is convenient to do so.
536 In all cases an rtx is returned for the locus of the value;
537 this may or may not be TARGET. */
540 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
541 rtx op1, rtx op2, rtx target, int unsignedp)
543 int icode = (int) ternary_optab->handlers[(int) mode].insn_code;
544 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
545 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
546 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
547 rtx temp;
548 rtx pat;
549 rtx xop0 = op0, xop1 = op1, xop2 = op2;
551 gcc_assert (ternary_optab->handlers[(int) mode].insn_code
552 != CODE_FOR_nothing);
554 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
555 temp = gen_reg_rtx (mode);
556 else
557 temp = target;
559 /* In case the insn wants input operands in modes different from
560 those of the actual operands, convert the operands. It would
561 seem that we don't need to convert CONST_INTs, but we do, so
562 that they're properly zero-extended, sign-extended or truncated
563 for their mode. */
565 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
566 xop0 = convert_modes (mode0,
567 GET_MODE (op0) != VOIDmode
568 ? GET_MODE (op0)
569 : mode,
570 xop0, unsignedp);
572 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
573 xop1 = convert_modes (mode1,
574 GET_MODE (op1) != VOIDmode
575 ? GET_MODE (op1)
576 : mode,
577 xop1, unsignedp);
579 if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
580 xop2 = convert_modes (mode2,
581 GET_MODE (op2) != VOIDmode
582 ? GET_MODE (op2)
583 : mode,
584 xop2, unsignedp);
586 /* Now, if insn's predicates don't allow our operands, put them into
587 pseudo regs. */
589 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
590 && mode0 != VOIDmode)
591 xop0 = copy_to_mode_reg (mode0, xop0);
593 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
594 && mode1 != VOIDmode)
595 xop1 = copy_to_mode_reg (mode1, xop1);
597 if (!insn_data[icode].operand[3].predicate (xop2, mode2)
598 && mode2 != VOIDmode)
599 xop2 = copy_to_mode_reg (mode2, xop2);
601 pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
603 emit_insn (pat);
604 return temp;
608 /* Like expand_binop, but return a constant rtx if the result can be
609 calculated at compile time. The arguments and return value are
610 otherwise the same as for expand_binop. */
612 static rtx
613 simplify_expand_binop (enum machine_mode mode, optab binoptab,
614 rtx op0, rtx op1, rtx target, int unsignedp,
615 enum optab_methods methods)
617 if (CONSTANT_P (op0) && CONSTANT_P (op1))
619 rtx x = simplify_binary_operation (binoptab->code, mode, op0, op1);
621 if (x)
622 return x;
625 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
628 /* Like simplify_expand_binop, but always put the result in TARGET.
629 Return true if the expansion succeeded. */
631 bool
632 force_expand_binop (enum machine_mode mode, optab binoptab,
633 rtx op0, rtx op1, rtx target, int unsignedp,
634 enum optab_methods methods)
636 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
637 target, unsignedp, methods);
638 if (x == 0)
639 return false;
640 if (x != target)
641 emit_move_insn (target, x);
642 return true;
645 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
648 expand_vec_shift_expr (tree vec_shift_expr, rtx target)
650 enum insn_code icode;
651 rtx rtx_op1, rtx_op2;
652 enum machine_mode mode1;
653 enum machine_mode mode2;
654 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_shift_expr));
655 tree vec_oprnd = TREE_OPERAND (vec_shift_expr, 0);
656 tree shift_oprnd = TREE_OPERAND (vec_shift_expr, 1);
657 optab shift_optab;
658 rtx pat;
660 switch (TREE_CODE (vec_shift_expr))
662 case VEC_RSHIFT_EXPR:
663 shift_optab = vec_shr_optab;
664 break;
665 case VEC_LSHIFT_EXPR:
666 shift_optab = vec_shl_optab;
667 break;
668 default:
669 gcc_unreachable ();
672 icode = (int) shift_optab->handlers[(int) mode].insn_code;
673 gcc_assert (icode != CODE_FOR_nothing);
675 mode1 = insn_data[icode].operand[1].mode;
676 mode2 = insn_data[icode].operand[2].mode;
678 rtx_op1 = expand_expr (vec_oprnd, NULL_RTX, VOIDmode, EXPAND_NORMAL);
679 if (!(*insn_data[icode].operand[1].predicate) (rtx_op1, mode1)
680 && mode1 != VOIDmode)
681 rtx_op1 = force_reg (mode1, rtx_op1);
683 rtx_op2 = expand_expr (shift_oprnd, NULL_RTX, VOIDmode, EXPAND_NORMAL);
684 if (!(*insn_data[icode].operand[2].predicate) (rtx_op2, mode2)
685 && mode2 != VOIDmode)
686 rtx_op2 = force_reg (mode2, rtx_op2);
688 if (!target
689 || ! (*insn_data[icode].operand[0].predicate) (target, mode))
690 target = gen_reg_rtx (mode);
692 /* Emit instruction */
693 pat = GEN_FCN (icode) (target, rtx_op1, rtx_op2);
694 gcc_assert (pat);
695 emit_insn (pat);
697 return target;
700 /* This subroutine of expand_doubleword_shift handles the cases in which
701 the effective shift value is >= BITS_PER_WORD. The arguments and return
702 value are the same as for the parent routine, except that SUPERWORD_OP1
703 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
704 INTO_TARGET may be null if the caller has decided to calculate it. */
706 static bool
707 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
708 rtx outof_target, rtx into_target,
709 int unsignedp, enum optab_methods methods)
711 if (into_target != 0)
712 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
713 into_target, unsignedp, methods))
714 return false;
716 if (outof_target != 0)
718 /* For a signed right shift, we must fill OUTOF_TARGET with copies
719 of the sign bit, otherwise we must fill it with zeros. */
720 if (binoptab != ashr_optab)
721 emit_move_insn (outof_target, CONST0_RTX (word_mode));
722 else
723 if (!force_expand_binop (word_mode, binoptab,
724 outof_input, GEN_INT (BITS_PER_WORD - 1),
725 outof_target, unsignedp, methods))
726 return false;
728 return true;
731 /* This subroutine of expand_doubleword_shift handles the cases in which
732 the effective shift value is < BITS_PER_WORD. The arguments and return
733 value are the same as for the parent routine. */
735 static bool
736 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
737 rtx outof_input, rtx into_input, rtx op1,
738 rtx outof_target, rtx into_target,
739 int unsignedp, enum optab_methods methods,
740 unsigned HOST_WIDE_INT shift_mask)
742 optab reverse_unsigned_shift, unsigned_shift;
743 rtx tmp, carries;
745 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
746 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
748 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
749 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
750 the opposite direction to BINOPTAB. */
751 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
753 carries = outof_input;
754 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
755 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
756 0, true, methods);
758 else
760 /* We must avoid shifting by BITS_PER_WORD bits since that is either
761 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
762 has unknown behavior. Do a single shift first, then shift by the
763 remainder. It's OK to use ~OP1 as the remainder if shift counts
764 are truncated to the mode size. */
765 carries = expand_binop (word_mode, reverse_unsigned_shift,
766 outof_input, const1_rtx, 0, unsignedp, methods);
767 if (shift_mask == BITS_PER_WORD - 1)
769 tmp = immed_double_const (-1, -1, op1_mode);
770 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
771 0, true, methods);
773 else
775 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
776 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
777 0, true, methods);
780 if (tmp == 0 || carries == 0)
781 return false;
782 carries = expand_binop (word_mode, reverse_unsigned_shift,
783 carries, tmp, 0, unsignedp, methods);
784 if (carries == 0)
785 return false;
787 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
788 so the result can go directly into INTO_TARGET if convenient. */
789 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
790 into_target, unsignedp, methods);
791 if (tmp == 0)
792 return false;
794 /* Now OR in the bits carried over from OUTOF_INPUT. */
795 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
796 into_target, unsignedp, methods))
797 return false;
799 /* Use a standard word_mode shift for the out-of half. */
800 if (outof_target != 0)
801 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
802 outof_target, unsignedp, methods))
803 return false;
805 return true;
809 #ifdef HAVE_conditional_move
810 /* Try implementing expand_doubleword_shift using conditional moves.
811 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
812 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
813 are the shift counts to use in the former and latter case. All other
814 arguments are the same as the parent routine. */
816 static bool
817 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
818 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
819 rtx outof_input, rtx into_input,
820 rtx subword_op1, rtx superword_op1,
821 rtx outof_target, rtx into_target,
822 int unsignedp, enum optab_methods methods,
823 unsigned HOST_WIDE_INT shift_mask)
825 rtx outof_superword, into_superword;
827 /* Put the superword version of the output into OUTOF_SUPERWORD and
828 INTO_SUPERWORD. */
829 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
830 if (outof_target != 0 && subword_op1 == superword_op1)
832 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
833 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
834 into_superword = outof_target;
835 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
836 outof_superword, 0, unsignedp, methods))
837 return false;
839 else
841 into_superword = gen_reg_rtx (word_mode);
842 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
843 outof_superword, into_superword,
844 unsignedp, methods))
845 return false;
848 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
849 if (!expand_subword_shift (op1_mode, binoptab,
850 outof_input, into_input, subword_op1,
851 outof_target, into_target,
852 unsignedp, methods, shift_mask))
853 return false;
855 /* Select between them. Do the INTO half first because INTO_SUPERWORD
856 might be the current value of OUTOF_TARGET. */
857 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
858 into_target, into_superword, word_mode, false))
859 return false;
861 if (outof_target != 0)
862 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
863 outof_target, outof_superword,
864 word_mode, false))
865 return false;
867 return true;
869 #endif
871 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
872 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
873 input operand; the shift moves bits in the direction OUTOF_INPUT->
874 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
875 of the target. OP1 is the shift count and OP1_MODE is its mode.
876 If OP1 is constant, it will have been truncated as appropriate
877 and is known to be nonzero.
879 If SHIFT_MASK is zero, the result of word shifts is undefined when the
880 shift count is outside the range [0, BITS_PER_WORD). This routine must
881 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
883 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
884 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
885 fill with zeros or sign bits as appropriate.
887 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
888 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
889 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
890 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
891 are undefined.
893 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
894 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
895 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
896 function wants to calculate it itself.
898 Return true if the shift could be successfully synthesized. */
900 static bool
901 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
902 rtx outof_input, rtx into_input, rtx op1,
903 rtx outof_target, rtx into_target,
904 int unsignedp, enum optab_methods methods,
905 unsigned HOST_WIDE_INT shift_mask)
907 rtx superword_op1, tmp, cmp1, cmp2;
908 rtx subword_label, done_label;
909 enum rtx_code cmp_code;
911 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
912 fill the result with sign or zero bits as appropriate. If so, the value
913 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
914 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
915 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
917 This isn't worthwhile for constant shifts since the optimizers will
918 cope better with in-range shift counts. */
919 if (shift_mask >= BITS_PER_WORD
920 && outof_target != 0
921 && !CONSTANT_P (op1))
923 if (!expand_doubleword_shift (op1_mode, binoptab,
924 outof_input, into_input, op1,
925 0, into_target,
926 unsignedp, methods, shift_mask))
927 return false;
928 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
929 outof_target, unsignedp, methods))
930 return false;
931 return true;
934 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
935 is true when the effective shift value is less than BITS_PER_WORD.
936 Set SUPERWORD_OP1 to the shift count that should be used to shift
937 OUTOF_INPUT into INTO_TARGET when the condition is false. */
938 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
939 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
941 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
942 is a subword shift count. */
943 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
944 0, true, methods);
945 cmp2 = CONST0_RTX (op1_mode);
946 cmp_code = EQ;
947 superword_op1 = op1;
949 else
951 /* Set CMP1 to OP1 - BITS_PER_WORD. */
952 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
953 0, true, methods);
954 cmp2 = CONST0_RTX (op1_mode);
955 cmp_code = LT;
956 superword_op1 = cmp1;
958 if (cmp1 == 0)
959 return false;
961 /* If we can compute the condition at compile time, pick the
962 appropriate subroutine. */
963 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
964 if (tmp != 0 && GET_CODE (tmp) == CONST_INT)
966 if (tmp == const0_rtx)
967 return expand_superword_shift (binoptab, outof_input, superword_op1,
968 outof_target, into_target,
969 unsignedp, methods);
970 else
971 return expand_subword_shift (op1_mode, binoptab,
972 outof_input, into_input, op1,
973 outof_target, into_target,
974 unsignedp, methods, shift_mask);
977 #ifdef HAVE_conditional_move
978 /* Try using conditional moves to generate straight-line code. */
980 rtx start = get_last_insn ();
981 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
982 cmp_code, cmp1, cmp2,
983 outof_input, into_input,
984 op1, superword_op1,
985 outof_target, into_target,
986 unsignedp, methods, shift_mask))
987 return true;
988 delete_insns_since (start);
990 #endif
992 /* As a last resort, use branches to select the correct alternative. */
993 subword_label = gen_label_rtx ();
994 done_label = gen_label_rtx ();
996 NO_DEFER_POP;
997 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
998 0, 0, subword_label);
999 OK_DEFER_POP;
1001 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
1002 outof_target, into_target,
1003 unsignedp, methods))
1004 return false;
1006 emit_jump_insn (gen_jump (done_label));
1007 emit_barrier ();
1008 emit_label (subword_label);
1010 if (!expand_subword_shift (op1_mode, binoptab,
1011 outof_input, into_input, op1,
1012 outof_target, into_target,
1013 unsignedp, methods, shift_mask))
1014 return false;
1016 emit_label (done_label);
1017 return true;
1020 /* Subroutine of expand_binop. Perform a double word multiplication of
1021 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
1022 as the target's word_mode. This function return NULL_RTX if anything
1023 goes wrong, in which case it may have already emitted instructions
1024 which need to be deleted.
1026 If we want to multiply two two-word values and have normal and widening
1027 multiplies of single-word values, we can do this with three smaller
1028 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1029 because we are not operating on one word at a time.
1031 The multiplication proceeds as follows:
1032 _______________________
1033 [__op0_high_|__op0_low__]
1034 _______________________
1035 * [__op1_high_|__op1_low__]
1036 _______________________________________________
1037 _______________________
1038 (1) [__op0_low__*__op1_low__]
1039 _______________________
1040 (2a) [__op0_low__*__op1_high_]
1041 _______________________
1042 (2b) [__op0_high_*__op1_low__]
1043 _______________________
1044 (3) [__op0_high_*__op1_high_]
1047 This gives a 4-word result. Since we are only interested in the
1048 lower 2 words, partial result (3) and the upper words of (2a) and
1049 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1050 calculated using non-widening multiplication.
1052 (1), however, needs to be calculated with an unsigned widening
1053 multiplication. If this operation is not directly supported we
1054 try using a signed widening multiplication and adjust the result.
1055 This adjustment works as follows:
1057 If both operands are positive then no adjustment is needed.
1059 If the operands have different signs, for example op0_low < 0 and
1060 op1_low >= 0, the instruction treats the most significant bit of
1061 op0_low as a sign bit instead of a bit with significance
1062 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1063 with 2**BITS_PER_WORD - op0_low, and two's complements the
1064 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1065 the result.
1067 Similarly, if both operands are negative, we need to add
1068 (op0_low + op1_low) * 2**BITS_PER_WORD.
1070 We use a trick to adjust quickly. We logically shift op0_low right
1071 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1072 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1073 logical shift exists, we do an arithmetic right shift and subtract
1074 the 0 or -1. */
1076 static rtx
1077 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
1078 bool umulp, enum optab_methods methods)
1080 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
1081 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
1082 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
1083 rtx product, adjust, product_high, temp;
1085 rtx op0_high = operand_subword_force (op0, high, mode);
1086 rtx op0_low = operand_subword_force (op0, low, mode);
1087 rtx op1_high = operand_subword_force (op1, high, mode);
1088 rtx op1_low = operand_subword_force (op1, low, mode);
1090 /* If we're using an unsigned multiply to directly compute the product
1091 of the low-order words of the operands and perform any required
1092 adjustments of the operands, we begin by trying two more multiplications
1093 and then computing the appropriate sum.
1095 We have checked above that the required addition is provided.
1096 Full-word addition will normally always succeed, especially if
1097 it is provided at all, so we don't worry about its failure. The
1098 multiplication may well fail, however, so we do handle that. */
1100 if (!umulp)
1102 /* ??? This could be done with emit_store_flag where available. */
1103 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
1104 NULL_RTX, 1, methods);
1105 if (temp)
1106 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
1107 NULL_RTX, 0, OPTAB_DIRECT);
1108 else
1110 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
1111 NULL_RTX, 0, methods);
1112 if (!temp)
1113 return NULL_RTX;
1114 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
1115 NULL_RTX, 0, OPTAB_DIRECT);
1118 if (!op0_high)
1119 return NULL_RTX;
1122 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
1123 NULL_RTX, 0, OPTAB_DIRECT);
1124 if (!adjust)
1125 return NULL_RTX;
1127 /* OP0_HIGH should now be dead. */
1129 if (!umulp)
1131 /* ??? This could be done with emit_store_flag where available. */
1132 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
1133 NULL_RTX, 1, methods);
1134 if (temp)
1135 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
1136 NULL_RTX, 0, OPTAB_DIRECT);
1137 else
1139 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
1140 NULL_RTX, 0, methods);
1141 if (!temp)
1142 return NULL_RTX;
1143 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
1144 NULL_RTX, 0, OPTAB_DIRECT);
1147 if (!op1_high)
1148 return NULL_RTX;
1151 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
1152 NULL_RTX, 0, OPTAB_DIRECT);
1153 if (!temp)
1154 return NULL_RTX;
1156 /* OP1_HIGH should now be dead. */
1158 adjust = expand_binop (word_mode, add_optab, adjust, temp,
1159 adjust, 0, OPTAB_DIRECT);
1161 if (target && !REG_P (target))
1162 target = NULL_RTX;
1164 if (umulp)
1165 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
1166 target, 1, OPTAB_DIRECT);
1167 else
1168 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
1169 target, 1, OPTAB_DIRECT);
1171 if (!product)
1172 return NULL_RTX;
1174 product_high = operand_subword (product, high, 1, mode);
1175 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
1176 REG_P (product_high) ? product_high : adjust,
1177 0, OPTAB_DIRECT);
1178 emit_move_insn (product_high, adjust);
1179 return product;
1182 /* Wrapper around expand_binop which takes an rtx code to specify
1183 the operation to perform, not an optab pointer. All other
1184 arguments are the same. */
1186 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
1187 rtx op1, rtx target, int unsignedp,
1188 enum optab_methods methods)
1190 optab binop = code_to_optab[(int) code];
1191 gcc_assert (binop);
1193 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1196 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1197 binop. Order them according to commutative_operand_precedence and, if
1198 possible, try to put TARGET or a pseudo first. */
1199 static bool
1200 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
1202 int op0_prec = commutative_operand_precedence (op0);
1203 int op1_prec = commutative_operand_precedence (op1);
1205 if (op0_prec < op1_prec)
1206 return true;
1208 if (op0_prec > op1_prec)
1209 return false;
1211 /* With equal precedence, both orders are ok, but it is better if the
1212 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1213 if (target == 0 || REG_P (target))
1214 return (REG_P (op1) && !REG_P (op0)) || target == op1;
1215 else
1216 return rtx_equal_p (op1, target);
1220 /* Generate code to perform an operation specified by BINOPTAB
1221 on operands OP0 and OP1, with result having machine-mode MODE.
1223 UNSIGNEDP is for the case where we have to widen the operands
1224 to perform the operation. It says to use zero-extension.
1226 If TARGET is nonzero, the value
1227 is generated there, if it is convenient to do so.
1228 In all cases an rtx is returned for the locus of the value;
1229 this may or may not be TARGET. */
1232 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
1233 rtx target, int unsignedp, enum optab_methods methods)
1235 enum optab_methods next_methods
1236 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1237 ? OPTAB_WIDEN : methods);
1238 enum mode_class class;
1239 enum machine_mode wider_mode;
1240 rtx temp;
1241 int commutative_op = 0;
1242 int shift_op = (binoptab->code == ASHIFT
1243 || binoptab->code == ASHIFTRT
1244 || binoptab->code == LSHIFTRT
1245 || binoptab->code == ROTATE
1246 || binoptab->code == ROTATERT);
1247 rtx entry_last = get_last_insn ();
1248 rtx last;
1249 bool first_pass_p = true;
1251 class = GET_MODE_CLASS (mode);
1253 /* If subtracting an integer constant, convert this into an addition of
1254 the negated constant. */
1256 if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
1258 op1 = negate_rtx (mode, op1);
1259 binoptab = add_optab;
1262 /* If we are inside an appropriately-short loop and we are optimizing,
1263 force expensive constants into a register. */
1264 if (CONSTANT_P (op0) && optimize
1265 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
1267 if (GET_MODE (op0) != VOIDmode)
1268 op0 = convert_modes (mode, VOIDmode, op0, unsignedp);
1269 op0 = force_reg (mode, op0);
1272 if (CONSTANT_P (op1) && optimize
1273 && ! shift_op && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
1275 if (GET_MODE (op1) != VOIDmode)
1276 op1 = convert_modes (mode, VOIDmode, op1, unsignedp);
1277 op1 = force_reg (mode, op1);
1280 /* Record where to delete back to if we backtrack. */
1281 last = get_last_insn ();
1283 /* If operation is commutative,
1284 try to make the first operand a register.
1285 Even better, try to make it the same as the target.
1286 Also try to make the last operand a constant. */
1287 if (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1288 || binoptab == smul_widen_optab
1289 || binoptab == umul_widen_optab
1290 || binoptab == smul_highpart_optab
1291 || binoptab == umul_highpart_optab)
1293 commutative_op = 1;
1295 if (swap_commutative_operands_with_target (target, op0, op1))
1297 temp = op1;
1298 op1 = op0;
1299 op0 = temp;
1303 retry:
1305 /* If we can do it with a three-operand insn, do so. */
1307 if (methods != OPTAB_MUST_WIDEN
1308 && binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1310 int icode = (int) binoptab->handlers[(int) mode].insn_code;
1311 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1312 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1313 enum machine_mode tmp_mode;
1314 rtx pat;
1315 rtx xop0 = op0, xop1 = op1;
1317 if (target)
1318 temp = target;
1319 else
1320 temp = gen_reg_rtx (mode);
1322 /* If it is a commutative operator and the modes would match
1323 if we would swap the operands, we can save the conversions. */
1324 if (commutative_op)
1326 if (GET_MODE (op0) != mode0 && GET_MODE (op1) != mode1
1327 && GET_MODE (op0) == mode1 && GET_MODE (op1) == mode0)
1329 rtx tmp;
1331 tmp = op0; op0 = op1; op1 = tmp;
1332 tmp = xop0; xop0 = xop1; xop1 = tmp;
1336 /* In case the insn wants input operands in modes different from
1337 those of the actual operands, convert the operands. It would
1338 seem that we don't need to convert CONST_INTs, but we do, so
1339 that they're properly zero-extended, sign-extended or truncated
1340 for their mode. */
1342 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
1343 xop0 = convert_modes (mode0,
1344 GET_MODE (op0) != VOIDmode
1345 ? GET_MODE (op0)
1346 : mode,
1347 xop0, unsignedp);
1349 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
1350 xop1 = convert_modes (mode1,
1351 GET_MODE (op1) != VOIDmode
1352 ? GET_MODE (op1)
1353 : mode,
1354 xop1, unsignedp);
1356 /* Now, if insn's predicates don't allow our operands, put them into
1357 pseudo regs. */
1359 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
1360 && mode0 != VOIDmode)
1361 xop0 = copy_to_mode_reg (mode0, xop0);
1363 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
1364 && mode1 != VOIDmode)
1365 xop1 = copy_to_mode_reg (mode1, xop1);
1367 if (binoptab == vec_pack_mod_optab
1368 || binoptab == vec_pack_usat_optab
1369 || binoptab == vec_pack_ssat_optab)
1371 /* The mode of the result is different then the mode of the
1372 arguments. */
1373 tmp_mode = insn_data[icode].operand[0].mode;
1374 if (GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode))
1375 return 0;
1377 else
1378 tmp_mode = mode;
1380 if (!insn_data[icode].operand[0].predicate (temp, tmp_mode))
1381 temp = gen_reg_rtx (tmp_mode);
1383 pat = GEN_FCN (icode) (temp, xop0, xop1);
1384 if (pat)
1386 /* If PAT is composed of more than one insn, try to add an appropriate
1387 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1388 operand, call ourselves again, this time without a target. */
1389 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1390 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
1392 delete_insns_since (last);
1393 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1394 unsignedp, methods);
1397 emit_insn (pat);
1398 return temp;
1400 else
1401 delete_insns_since (last);
1404 /* If we were trying to rotate by a constant value, and that didn't
1405 work, try rotating the other direction before falling back to
1406 shifts and bitwise-or. */
1407 if (first_pass_p
1408 && (binoptab == rotl_optab || binoptab == rotr_optab)
1409 && class == MODE_INT
1410 && GET_CODE (op1) == CONST_INT
1411 && INTVAL (op1) > 0
1412 && (unsigned int) INTVAL (op1) < GET_MODE_BITSIZE (mode))
1414 first_pass_p = false;
1415 op1 = GEN_INT (GET_MODE_BITSIZE (mode) - INTVAL (op1));
1416 binoptab = binoptab == rotl_optab ? rotr_optab : rotl_optab;
1417 goto retry;
1420 /* If this is a multiply, see if we can do a widening operation that
1421 takes operands of this mode and makes a wider mode. */
1423 if (binoptab == smul_optab
1424 && GET_MODE_WIDER_MODE (mode) != VOIDmode
1425 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1426 ->handlers[(int) GET_MODE_WIDER_MODE (mode)].insn_code)
1427 != CODE_FOR_nothing))
1429 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
1430 unsignedp ? umul_widen_optab : smul_widen_optab,
1431 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1433 if (temp != 0)
1435 if (GET_MODE_CLASS (mode) == MODE_INT
1436 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1437 GET_MODE_BITSIZE (GET_MODE (temp))))
1438 return gen_lowpart (mode, temp);
1439 else
1440 return convert_to_mode (mode, temp, unsignedp);
1444 /* Look for a wider mode of the same class for which we think we
1445 can open-code the operation. Check for a widening multiply at the
1446 wider mode as well. */
1448 if (CLASS_HAS_WIDER_MODES_P (class)
1449 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1450 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1451 wider_mode != VOIDmode;
1452 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1454 if (binoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
1455 || (binoptab == smul_optab
1456 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1457 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1458 ->handlers[(int) GET_MODE_WIDER_MODE (wider_mode)].insn_code)
1459 != CODE_FOR_nothing)))
1461 rtx xop0 = op0, xop1 = op1;
1462 int no_extend = 0;
1464 /* For certain integer operations, we need not actually extend
1465 the narrow operands, as long as we will truncate
1466 the results to the same narrowness. */
1468 if ((binoptab == ior_optab || binoptab == and_optab
1469 || binoptab == xor_optab
1470 || binoptab == add_optab || binoptab == sub_optab
1471 || binoptab == smul_optab || binoptab == ashl_optab)
1472 && class == MODE_INT)
1473 no_extend = 1;
1475 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1477 /* The second operand of a shift must always be extended. */
1478 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1479 no_extend && binoptab != ashl_optab);
1481 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1482 unsignedp, OPTAB_DIRECT);
1483 if (temp)
1485 if (class != MODE_INT
1486 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1487 GET_MODE_BITSIZE (wider_mode)))
1489 if (target == 0)
1490 target = gen_reg_rtx (mode);
1491 convert_move (target, temp, 0);
1492 return target;
1494 else
1495 return gen_lowpart (mode, temp);
1497 else
1498 delete_insns_since (last);
1502 /* These can be done a word at a time. */
1503 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1504 && class == MODE_INT
1505 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1506 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1508 int i;
1509 rtx insns;
1510 rtx equiv_value;
1512 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1513 won't be accurate, so use a new target. */
1514 if (target == 0 || target == op0 || target == op1)
1515 target = gen_reg_rtx (mode);
1517 start_sequence ();
1519 /* Do the actual arithmetic. */
1520 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1522 rtx target_piece = operand_subword (target, i, 1, mode);
1523 rtx x = expand_binop (word_mode, binoptab,
1524 operand_subword_force (op0, i, mode),
1525 operand_subword_force (op1, i, mode),
1526 target_piece, unsignedp, next_methods);
1528 if (x == 0)
1529 break;
1531 if (target_piece != x)
1532 emit_move_insn (target_piece, x);
1535 insns = get_insns ();
1536 end_sequence ();
1538 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1540 if (binoptab->code != UNKNOWN)
1541 equiv_value
1542 = gen_rtx_fmt_ee (binoptab->code, mode,
1543 copy_rtx (op0), copy_rtx (op1));
1544 else
1545 equiv_value = 0;
1547 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1548 return target;
1552 /* Synthesize double word shifts from single word shifts. */
1553 if ((binoptab == lshr_optab || binoptab == ashl_optab
1554 || binoptab == ashr_optab)
1555 && class == MODE_INT
1556 && (GET_CODE (op1) == CONST_INT || !optimize_size)
1557 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1558 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1559 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1560 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1562 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1563 enum machine_mode op1_mode;
1565 double_shift_mask = targetm.shift_truncation_mask (mode);
1566 shift_mask = targetm.shift_truncation_mask (word_mode);
1567 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1569 /* Apply the truncation to constant shifts. */
1570 if (double_shift_mask > 0 && GET_CODE (op1) == CONST_INT)
1571 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1573 if (op1 == CONST0_RTX (op1_mode))
1574 return op0;
1576 /* Make sure that this is a combination that expand_doubleword_shift
1577 can handle. See the comments there for details. */
1578 if (double_shift_mask == 0
1579 || (shift_mask == BITS_PER_WORD - 1
1580 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1582 rtx insns, equiv_value;
1583 rtx into_target, outof_target;
1584 rtx into_input, outof_input;
1585 int left_shift, outof_word;
1587 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1588 won't be accurate, so use a new target. */
1589 if (target == 0 || target == op0 || target == op1)
1590 target = gen_reg_rtx (mode);
1592 start_sequence ();
1594 /* OUTOF_* is the word we are shifting bits away from, and
1595 INTO_* is the word that we are shifting bits towards, thus
1596 they differ depending on the direction of the shift and
1597 WORDS_BIG_ENDIAN. */
1599 left_shift = binoptab == ashl_optab;
1600 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1602 outof_target = operand_subword (target, outof_word, 1, mode);
1603 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1605 outof_input = operand_subword_force (op0, outof_word, mode);
1606 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1608 if (expand_doubleword_shift (op1_mode, binoptab,
1609 outof_input, into_input, op1,
1610 outof_target, into_target,
1611 unsignedp, next_methods, shift_mask))
1613 insns = get_insns ();
1614 end_sequence ();
1616 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1617 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1618 return target;
1620 end_sequence ();
1624 /* Synthesize double word rotates from single word shifts. */
1625 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1626 && class == MODE_INT
1627 && GET_CODE (op1) == CONST_INT
1628 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1629 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1630 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1632 rtx insns;
1633 rtx into_target, outof_target;
1634 rtx into_input, outof_input;
1635 rtx inter;
1636 int shift_count, left_shift, outof_word;
1638 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1639 won't be accurate, so use a new target. Do this also if target is not
1640 a REG, first because having a register instead may open optimization
1641 opportunities, and second because if target and op0 happen to be MEMs
1642 designating the same location, we would risk clobbering it too early
1643 in the code sequence we generate below. */
1644 if (target == 0 || target == op0 || target == op1 || ! REG_P (target))
1645 target = gen_reg_rtx (mode);
1647 start_sequence ();
1649 shift_count = INTVAL (op1);
1651 /* OUTOF_* is the word we are shifting bits away from, and
1652 INTO_* is the word that we are shifting bits towards, thus
1653 they differ depending on the direction of the shift and
1654 WORDS_BIG_ENDIAN. */
1656 left_shift = (binoptab == rotl_optab);
1657 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1659 outof_target = operand_subword (target, outof_word, 1, mode);
1660 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1662 outof_input = operand_subword_force (op0, outof_word, mode);
1663 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1665 if (shift_count == BITS_PER_WORD)
1667 /* This is just a word swap. */
1668 emit_move_insn (outof_target, into_input);
1669 emit_move_insn (into_target, outof_input);
1670 inter = const0_rtx;
1672 else
1674 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1675 rtx first_shift_count, second_shift_count;
1676 optab reverse_unsigned_shift, unsigned_shift;
1678 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1679 ? lshr_optab : ashl_optab);
1681 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1682 ? ashl_optab : lshr_optab);
1684 if (shift_count > BITS_PER_WORD)
1686 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1687 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1689 else
1691 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1692 second_shift_count = GEN_INT (shift_count);
1695 into_temp1 = expand_binop (word_mode, unsigned_shift,
1696 outof_input, first_shift_count,
1697 NULL_RTX, unsignedp, next_methods);
1698 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1699 into_input, second_shift_count,
1700 NULL_RTX, unsignedp, next_methods);
1702 if (into_temp1 != 0 && into_temp2 != 0)
1703 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1704 into_target, unsignedp, next_methods);
1705 else
1706 inter = 0;
1708 if (inter != 0 && inter != into_target)
1709 emit_move_insn (into_target, inter);
1711 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1712 into_input, first_shift_count,
1713 NULL_RTX, unsignedp, next_methods);
1714 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1715 outof_input, second_shift_count,
1716 NULL_RTX, unsignedp, next_methods);
1718 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1719 inter = expand_binop (word_mode, ior_optab,
1720 outof_temp1, outof_temp2,
1721 outof_target, unsignedp, next_methods);
1723 if (inter != 0 && inter != outof_target)
1724 emit_move_insn (outof_target, inter);
1727 insns = get_insns ();
1728 end_sequence ();
1730 if (inter != 0)
1732 /* One may be tempted to wrap the insns in a REG_NO_CONFLICT
1733 block to help the register allocator a bit. But a multi-word
1734 rotate will need all the input bits when setting the output
1735 bits, so there clearly is a conflict between the input and
1736 output registers. So we can't use a no-conflict block here. */
1737 emit_insn (insns);
1738 return target;
1742 /* These can be done a word at a time by propagating carries. */
1743 if ((binoptab == add_optab || binoptab == sub_optab)
1744 && class == MODE_INT
1745 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1746 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1748 unsigned int i;
1749 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1750 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1751 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1752 rtx xop0, xop1, xtarget;
1754 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1755 value is one of those, use it. Otherwise, use 1 since it is the
1756 one easiest to get. */
1757 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1758 int normalizep = STORE_FLAG_VALUE;
1759 #else
1760 int normalizep = 1;
1761 #endif
1763 /* Prepare the operands. */
1764 xop0 = force_reg (mode, op0);
1765 xop1 = force_reg (mode, op1);
1767 xtarget = gen_reg_rtx (mode);
1769 if (target == 0 || !REG_P (target))
1770 target = xtarget;
1772 /* Indicate for flow that the entire target reg is being set. */
1773 if (REG_P (target))
1774 emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget));
1776 /* Do the actual arithmetic. */
1777 for (i = 0; i < nwords; i++)
1779 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1780 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1781 rtx op0_piece = operand_subword_force (xop0, index, mode);
1782 rtx op1_piece = operand_subword_force (xop1, index, mode);
1783 rtx x;
1785 /* Main add/subtract of the input operands. */
1786 x = expand_binop (word_mode, binoptab,
1787 op0_piece, op1_piece,
1788 target_piece, unsignedp, next_methods);
1789 if (x == 0)
1790 break;
1792 if (i + 1 < nwords)
1794 /* Store carry from main add/subtract. */
1795 carry_out = gen_reg_rtx (word_mode);
1796 carry_out = emit_store_flag_force (carry_out,
1797 (binoptab == add_optab
1798 ? LT : GT),
1799 x, op0_piece,
1800 word_mode, 1, normalizep);
1803 if (i > 0)
1805 rtx newx;
1807 /* Add/subtract previous carry to main result. */
1808 newx = expand_binop (word_mode,
1809 normalizep == 1 ? binoptab : otheroptab,
1810 x, carry_in,
1811 NULL_RTX, 1, next_methods);
1813 if (i + 1 < nwords)
1815 /* Get out carry from adding/subtracting carry in. */
1816 rtx carry_tmp = gen_reg_rtx (word_mode);
1817 carry_tmp = emit_store_flag_force (carry_tmp,
1818 (binoptab == add_optab
1819 ? LT : GT),
1820 newx, x,
1821 word_mode, 1, normalizep);
1823 /* Logical-ior the two poss. carry together. */
1824 carry_out = expand_binop (word_mode, ior_optab,
1825 carry_out, carry_tmp,
1826 carry_out, 0, next_methods);
1827 if (carry_out == 0)
1828 break;
1830 emit_move_insn (target_piece, newx);
1832 else
1834 if (x != target_piece)
1835 emit_move_insn (target_piece, x);
1838 carry_in = carry_out;
1841 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
1843 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
1844 || ! rtx_equal_p (target, xtarget))
1846 rtx temp = emit_move_insn (target, xtarget);
1848 set_unique_reg_note (temp,
1849 REG_EQUAL,
1850 gen_rtx_fmt_ee (binoptab->code, mode,
1851 copy_rtx (xop0),
1852 copy_rtx (xop1)));
1854 else
1855 target = xtarget;
1857 return target;
1860 else
1861 delete_insns_since (last);
1864 /* Attempt to synthesize double word multiplies using a sequence of word
1865 mode multiplications. We first attempt to generate a sequence using a
1866 more efficient unsigned widening multiply, and if that fails we then
1867 try using a signed widening multiply. */
1869 if (binoptab == smul_optab
1870 && class == MODE_INT
1871 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1872 && smul_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1873 && add_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1875 rtx product = NULL_RTX;
1877 if (umul_widen_optab->handlers[(int) mode].insn_code
1878 != CODE_FOR_nothing)
1880 product = expand_doubleword_mult (mode, op0, op1, target,
1881 true, methods);
1882 if (!product)
1883 delete_insns_since (last);
1886 if (product == NULL_RTX
1887 && smul_widen_optab->handlers[(int) mode].insn_code
1888 != CODE_FOR_nothing)
1890 product = expand_doubleword_mult (mode, op0, op1, target,
1891 false, methods);
1892 if (!product)
1893 delete_insns_since (last);
1896 if (product != NULL_RTX)
1898 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1900 temp = emit_move_insn (target ? target : product, product);
1901 set_unique_reg_note (temp,
1902 REG_EQUAL,
1903 gen_rtx_fmt_ee (MULT, mode,
1904 copy_rtx (op0),
1905 copy_rtx (op1)));
1907 return product;
1911 /* It can't be open-coded in this mode.
1912 Use a library call if one is available and caller says that's ok. */
1914 if (binoptab->handlers[(int) mode].libfunc
1915 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1917 rtx insns;
1918 rtx op1x = op1;
1919 enum machine_mode op1_mode = mode;
1920 rtx value;
1922 start_sequence ();
1924 if (shift_op)
1926 op1_mode = word_mode;
1927 /* Specify unsigned here,
1928 since negative shift counts are meaningless. */
1929 op1x = convert_to_mode (word_mode, op1, 1);
1932 if (GET_MODE (op0) != VOIDmode
1933 && GET_MODE (op0) != mode)
1934 op0 = convert_to_mode (mode, op0, unsignedp);
1936 /* Pass 1 for NO_QUEUE so we don't lose any increments
1937 if the libcall is cse'd or moved. */
1938 value = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
1939 NULL_RTX, LCT_CONST, mode, 2,
1940 op0, mode, op1x, op1_mode);
1942 insns = get_insns ();
1943 end_sequence ();
1945 target = gen_reg_rtx (mode);
1946 emit_libcall_block (insns, target, value,
1947 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
1949 return target;
1952 delete_insns_since (last);
1954 /* It can't be done in this mode. Can we do it in a wider mode? */
1956 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
1957 || methods == OPTAB_MUST_WIDEN))
1959 /* Caller says, don't even try. */
1960 delete_insns_since (entry_last);
1961 return 0;
1964 /* Compute the value of METHODS to pass to recursive calls.
1965 Don't allow widening to be tried recursively. */
1967 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
1969 /* Look for a wider mode of the same class for which it appears we can do
1970 the operation. */
1972 if (CLASS_HAS_WIDER_MODES_P (class))
1974 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1975 wider_mode != VOIDmode;
1976 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1978 if ((binoptab->handlers[(int) wider_mode].insn_code
1979 != CODE_FOR_nothing)
1980 || (methods == OPTAB_LIB
1981 && binoptab->handlers[(int) wider_mode].libfunc))
1983 rtx xop0 = op0, xop1 = op1;
1984 int no_extend = 0;
1986 /* For certain integer operations, we need not actually extend
1987 the narrow operands, as long as we will truncate
1988 the results to the same narrowness. */
1990 if ((binoptab == ior_optab || binoptab == and_optab
1991 || binoptab == xor_optab
1992 || binoptab == add_optab || binoptab == sub_optab
1993 || binoptab == smul_optab || binoptab == ashl_optab)
1994 && class == MODE_INT)
1995 no_extend = 1;
1997 xop0 = widen_operand (xop0, wider_mode, mode,
1998 unsignedp, no_extend);
2000 /* The second operand of a shift must always be extended. */
2001 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
2002 no_extend && binoptab != ashl_optab);
2004 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
2005 unsignedp, methods);
2006 if (temp)
2008 if (class != MODE_INT
2009 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2010 GET_MODE_BITSIZE (wider_mode)))
2012 if (target == 0)
2013 target = gen_reg_rtx (mode);
2014 convert_move (target, temp, 0);
2015 return target;
2017 else
2018 return gen_lowpart (mode, temp);
2020 else
2021 delete_insns_since (last);
2026 delete_insns_since (entry_last);
2027 return 0;
2030 /* Expand a binary operator which has both signed and unsigned forms.
2031 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2032 signed operations.
2034 If we widen unsigned operands, we may use a signed wider operation instead
2035 of an unsigned wider operation, since the result would be the same. */
2038 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
2039 rtx op0, rtx op1, rtx target, int unsignedp,
2040 enum optab_methods methods)
2042 rtx temp;
2043 optab direct_optab = unsignedp ? uoptab : soptab;
2044 struct optab wide_soptab;
2046 /* Do it without widening, if possible. */
2047 temp = expand_binop (mode, direct_optab, op0, op1, target,
2048 unsignedp, OPTAB_DIRECT);
2049 if (temp || methods == OPTAB_DIRECT)
2050 return temp;
2052 /* Try widening to a signed int. Make a fake signed optab that
2053 hides any signed insn for direct use. */
2054 wide_soptab = *soptab;
2055 wide_soptab.handlers[(int) mode].insn_code = CODE_FOR_nothing;
2056 wide_soptab.handlers[(int) mode].libfunc = 0;
2058 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2059 unsignedp, OPTAB_WIDEN);
2061 /* For unsigned operands, try widening to an unsigned int. */
2062 if (temp == 0 && unsignedp)
2063 temp = expand_binop (mode, uoptab, op0, op1, target,
2064 unsignedp, OPTAB_WIDEN);
2065 if (temp || methods == OPTAB_WIDEN)
2066 return temp;
2068 /* Use the right width lib call if that exists. */
2069 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
2070 if (temp || methods == OPTAB_LIB)
2071 return temp;
2073 /* Must widen and use a lib call, use either signed or unsigned. */
2074 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2075 unsignedp, methods);
2076 if (temp != 0)
2077 return temp;
2078 if (unsignedp)
2079 return expand_binop (mode, uoptab, op0, op1, target,
2080 unsignedp, methods);
2081 return 0;
2084 /* Generate code to perform an operation specified by UNOPPTAB
2085 on operand OP0, with two results to TARG0 and TARG1.
2086 We assume that the order of the operands for the instruction
2087 is TARG0, TARG1, OP0.
2089 Either TARG0 or TARG1 may be zero, but what that means is that
2090 the result is not actually wanted. We will generate it into
2091 a dummy pseudo-reg and discard it. They may not both be zero.
2093 Returns 1 if this operation can be performed; 0 if not. */
2096 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
2097 int unsignedp)
2099 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2100 enum mode_class class;
2101 enum machine_mode wider_mode;
2102 rtx entry_last = get_last_insn ();
2103 rtx last;
2105 class = GET_MODE_CLASS (mode);
2107 if (!targ0)
2108 targ0 = gen_reg_rtx (mode);
2109 if (!targ1)
2110 targ1 = gen_reg_rtx (mode);
2112 /* Record where to go back to if we fail. */
2113 last = get_last_insn ();
2115 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2117 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2118 enum machine_mode mode0 = insn_data[icode].operand[2].mode;
2119 rtx pat;
2120 rtx xop0 = op0;
2122 if (GET_MODE (xop0) != VOIDmode
2123 && GET_MODE (xop0) != mode0)
2124 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2126 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2127 if (!insn_data[icode].operand[2].predicate (xop0, mode0))
2128 xop0 = copy_to_mode_reg (mode0, xop0);
2130 /* We could handle this, but we should always be called with a pseudo
2131 for our targets and all insns should take them as outputs. */
2132 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2133 gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
2135 pat = GEN_FCN (icode) (targ0, targ1, xop0);
2136 if (pat)
2138 emit_insn (pat);
2139 return 1;
2141 else
2142 delete_insns_since (last);
2145 /* It can't be done in this mode. Can we do it in a wider mode? */
2147 if (CLASS_HAS_WIDER_MODES_P (class))
2149 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2150 wider_mode != VOIDmode;
2151 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2153 if (unoptab->handlers[(int) wider_mode].insn_code
2154 != CODE_FOR_nothing)
2156 rtx t0 = gen_reg_rtx (wider_mode);
2157 rtx t1 = gen_reg_rtx (wider_mode);
2158 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2160 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
2162 convert_move (targ0, t0, unsignedp);
2163 convert_move (targ1, t1, unsignedp);
2164 return 1;
2166 else
2167 delete_insns_since (last);
2172 delete_insns_since (entry_last);
2173 return 0;
2176 /* Generate code to perform an operation specified by BINOPTAB
2177 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2178 We assume that the order of the operands for the instruction
2179 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2180 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2182 Either TARG0 or TARG1 may be zero, but what that means is that
2183 the result is not actually wanted. We will generate it into
2184 a dummy pseudo-reg and discard it. They may not both be zero.
2186 Returns 1 if this operation can be performed; 0 if not. */
2189 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2190 int unsignedp)
2192 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2193 enum mode_class class;
2194 enum machine_mode wider_mode;
2195 rtx entry_last = get_last_insn ();
2196 rtx last;
2198 class = GET_MODE_CLASS (mode);
2200 /* If we are inside an appropriately-short loop and we are optimizing,
2201 force expensive constants into a register. */
2202 if (CONSTANT_P (op0) && optimize
2203 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
2204 op0 = force_reg (mode, op0);
2206 if (CONSTANT_P (op1) && optimize
2207 && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
2208 op1 = force_reg (mode, op1);
2210 if (!targ0)
2211 targ0 = gen_reg_rtx (mode);
2212 if (!targ1)
2213 targ1 = gen_reg_rtx (mode);
2215 /* Record where to go back to if we fail. */
2216 last = get_last_insn ();
2218 if (binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2220 int icode = (int) binoptab->handlers[(int) mode].insn_code;
2221 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2222 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
2223 rtx pat;
2224 rtx xop0 = op0, xop1 = op1;
2226 /* In case the insn wants input operands in modes different from
2227 those of the actual operands, convert the operands. It would
2228 seem that we don't need to convert CONST_INTs, but we do, so
2229 that they're properly zero-extended, sign-extended or truncated
2230 for their mode. */
2232 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
2233 xop0 = convert_modes (mode0,
2234 GET_MODE (op0) != VOIDmode
2235 ? GET_MODE (op0)
2236 : mode,
2237 xop0, unsignedp);
2239 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
2240 xop1 = convert_modes (mode1,
2241 GET_MODE (op1) != VOIDmode
2242 ? GET_MODE (op1)
2243 : mode,
2244 xop1, unsignedp);
2246 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2247 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2248 xop0 = copy_to_mode_reg (mode0, xop0);
2250 if (!insn_data[icode].operand[2].predicate (xop1, mode1))
2251 xop1 = copy_to_mode_reg (mode1, xop1);
2253 /* We could handle this, but we should always be called with a pseudo
2254 for our targets and all insns should take them as outputs. */
2255 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2256 gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
2258 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
2259 if (pat)
2261 emit_insn (pat);
2262 return 1;
2264 else
2265 delete_insns_since (last);
2268 /* It can't be done in this mode. Can we do it in a wider mode? */
2270 if (CLASS_HAS_WIDER_MODES_P (class))
2272 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2273 wider_mode != VOIDmode;
2274 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2276 if (binoptab->handlers[(int) wider_mode].insn_code
2277 != CODE_FOR_nothing)
2279 rtx t0 = gen_reg_rtx (wider_mode);
2280 rtx t1 = gen_reg_rtx (wider_mode);
2281 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2282 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2284 if (expand_twoval_binop (binoptab, cop0, cop1,
2285 t0, t1, unsignedp))
2287 convert_move (targ0, t0, unsignedp);
2288 convert_move (targ1, t1, unsignedp);
2289 return 1;
2291 else
2292 delete_insns_since (last);
2297 delete_insns_since (entry_last);
2298 return 0;
2301 /* Expand the two-valued library call indicated by BINOPTAB, but
2302 preserve only one of the values. If TARG0 is non-NULL, the first
2303 value is placed into TARG0; otherwise the second value is placed
2304 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2305 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2306 This routine assumes that the value returned by the library call is
2307 as if the return value was of an integral mode twice as wide as the
2308 mode of OP0. Returns 1 if the call was successful. */
2310 bool
2311 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2312 rtx targ0, rtx targ1, enum rtx_code code)
2314 enum machine_mode mode;
2315 enum machine_mode libval_mode;
2316 rtx libval;
2317 rtx insns;
2319 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2320 gcc_assert (!targ0 != !targ1);
2322 mode = GET_MODE (op0);
2323 if (!binoptab->handlers[(int) mode].libfunc)
2324 return false;
2326 /* The value returned by the library function will have twice as
2327 many bits as the nominal MODE. */
2328 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2329 MODE_INT);
2330 start_sequence ();
2331 libval = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
2332 NULL_RTX, LCT_CONST,
2333 libval_mode, 2,
2334 op0, mode,
2335 op1, mode);
2336 /* Get the part of VAL containing the value that we want. */
2337 libval = simplify_gen_subreg (mode, libval, libval_mode,
2338 targ0 ? 0 : GET_MODE_SIZE (mode));
2339 insns = get_insns ();
2340 end_sequence ();
2341 /* Move the into the desired location. */
2342 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2343 gen_rtx_fmt_ee (code, mode, op0, op1));
2345 return true;
2349 /* Wrapper around expand_unop which takes an rtx code to specify
2350 the operation to perform, not an optab pointer. All other
2351 arguments are the same. */
2353 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2354 rtx target, int unsignedp)
2356 optab unop = code_to_optab[(int) code];
2357 gcc_assert (unop);
2359 return expand_unop (mode, unop, op0, target, unsignedp);
2362 /* Try calculating
2363 (clz:narrow x)
2365 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2366 static rtx
2367 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2369 enum mode_class class = GET_MODE_CLASS (mode);
2370 if (CLASS_HAS_WIDER_MODES_P (class))
2372 enum machine_mode wider_mode;
2373 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2374 wider_mode != VOIDmode;
2375 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2377 if (clz_optab->handlers[(int) wider_mode].insn_code
2378 != CODE_FOR_nothing)
2380 rtx xop0, temp, last;
2382 last = get_last_insn ();
2384 if (target == 0)
2385 target = gen_reg_rtx (mode);
2386 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2387 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2388 if (temp != 0)
2389 temp = expand_binop (wider_mode, sub_optab, temp,
2390 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2391 - GET_MODE_BITSIZE (mode)),
2392 target, true, OPTAB_DIRECT);
2393 if (temp == 0)
2394 delete_insns_since (last);
2396 return temp;
2400 return 0;
2403 /* Try calculating (parity x) as (and (popcount x) 1), where
2404 popcount can also be done in a wider mode. */
2405 static rtx
2406 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2408 enum mode_class class = GET_MODE_CLASS (mode);
2409 if (CLASS_HAS_WIDER_MODES_P (class))
2411 enum machine_mode wider_mode;
2412 for (wider_mode = mode; wider_mode != VOIDmode;
2413 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2415 if (popcount_optab->handlers[(int) wider_mode].insn_code
2416 != CODE_FOR_nothing)
2418 rtx xop0, temp, last;
2420 last = get_last_insn ();
2422 if (target == 0)
2423 target = gen_reg_rtx (mode);
2424 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2425 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2426 true);
2427 if (temp != 0)
2428 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2429 target, true, OPTAB_DIRECT);
2430 if (temp == 0)
2431 delete_insns_since (last);
2433 return temp;
2437 return 0;
2440 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2441 conditions, VAL may already be a SUBREG against which we cannot generate
2442 a further SUBREG. In this case, we expect forcing the value into a
2443 register will work around the situation. */
2445 static rtx
2446 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2447 enum machine_mode imode)
2449 rtx ret;
2450 ret = lowpart_subreg (omode, val, imode);
2451 if (ret == NULL)
2453 val = force_reg (imode, val);
2454 ret = lowpart_subreg (omode, val, imode);
2455 gcc_assert (ret != NULL);
2457 return ret;
2460 /* Expand a floating point absolute value or negation operation via a
2461 logical operation on the sign bit. */
2463 static rtx
2464 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2465 rtx op0, rtx target)
2467 const struct real_format *fmt;
2468 int bitpos, word, nwords, i;
2469 enum machine_mode imode;
2470 HOST_WIDE_INT hi, lo;
2471 rtx temp, insns;
2473 /* The format has to have a simple sign bit. */
2474 fmt = REAL_MODE_FORMAT (mode);
2475 if (fmt == NULL)
2476 return NULL_RTX;
2478 bitpos = fmt->signbit_rw;
2479 if (bitpos < 0)
2480 return NULL_RTX;
2482 /* Don't create negative zeros if the format doesn't support them. */
2483 if (code == NEG && !fmt->has_signed_zero)
2484 return NULL_RTX;
2486 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2488 imode = int_mode_for_mode (mode);
2489 if (imode == BLKmode)
2490 return NULL_RTX;
2491 word = 0;
2492 nwords = 1;
2494 else
2496 imode = word_mode;
2498 if (FLOAT_WORDS_BIG_ENDIAN)
2499 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2500 else
2501 word = bitpos / BITS_PER_WORD;
2502 bitpos = bitpos % BITS_PER_WORD;
2503 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2506 if (bitpos < HOST_BITS_PER_WIDE_INT)
2508 hi = 0;
2509 lo = (HOST_WIDE_INT) 1 << bitpos;
2511 else
2513 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2514 lo = 0;
2516 if (code == ABS)
2517 lo = ~lo, hi = ~hi;
2519 if (target == 0 || target == op0)
2520 target = gen_reg_rtx (mode);
2522 if (nwords > 1)
2524 start_sequence ();
2526 for (i = 0; i < nwords; ++i)
2528 rtx targ_piece = operand_subword (target, i, 1, mode);
2529 rtx op0_piece = operand_subword_force (op0, i, mode);
2531 if (i == word)
2533 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2534 op0_piece,
2535 immed_double_const (lo, hi, imode),
2536 targ_piece, 1, OPTAB_LIB_WIDEN);
2537 if (temp != targ_piece)
2538 emit_move_insn (targ_piece, temp);
2540 else
2541 emit_move_insn (targ_piece, op0_piece);
2544 insns = get_insns ();
2545 end_sequence ();
2547 temp = gen_rtx_fmt_e (code, mode, copy_rtx (op0));
2548 emit_no_conflict_block (insns, target, op0, NULL_RTX, temp);
2550 else
2552 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2553 gen_lowpart (imode, op0),
2554 immed_double_const (lo, hi, imode),
2555 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2556 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2558 set_unique_reg_note (get_last_insn (), REG_EQUAL,
2559 gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
2562 return target;
2565 /* Generate code to perform an operation specified by UNOPTAB
2566 on operand OP0, with result having machine-mode MODE.
2568 UNSIGNEDP is for the case where we have to widen the operands
2569 to perform the operation. It says to use zero-extension.
2571 If TARGET is nonzero, the value
2572 is generated there, if it is convenient to do so.
2573 In all cases an rtx is returned for the locus of the value;
2574 this may or may not be TARGET. */
2577 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
2578 int unsignedp)
2580 enum mode_class class;
2581 enum machine_mode wider_mode;
2582 rtx temp;
2583 rtx last = get_last_insn ();
2584 rtx pat;
2586 class = GET_MODE_CLASS (mode);
2588 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2590 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2591 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2592 rtx xop0 = op0;
2594 if (target)
2595 temp = target;
2596 else
2597 temp = gen_reg_rtx (mode);
2599 if (GET_MODE (xop0) != VOIDmode
2600 && GET_MODE (xop0) != mode0)
2601 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2603 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2605 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2606 xop0 = copy_to_mode_reg (mode0, xop0);
2608 if (!insn_data[icode].operand[0].predicate (temp, mode))
2609 temp = gen_reg_rtx (mode);
2611 pat = GEN_FCN (icode) (temp, xop0);
2612 if (pat)
2614 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2615 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
2617 delete_insns_since (last);
2618 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2621 emit_insn (pat);
2623 return temp;
2625 else
2626 delete_insns_since (last);
2629 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2631 /* Widening clz needs special treatment. */
2632 if (unoptab == clz_optab)
2634 temp = widen_clz (mode, op0, target);
2635 if (temp)
2636 return temp;
2637 else
2638 goto try_libcall;
2641 /* We can't widen a bswap. */
2642 if (unoptab == bswap_optab)
2643 goto try_libcall;
2645 if (CLASS_HAS_WIDER_MODES_P (class))
2646 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2647 wider_mode != VOIDmode;
2648 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2650 if (unoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing)
2652 rtx xop0 = op0;
2654 /* For certain operations, we need not actually extend
2655 the narrow operand, as long as we will truncate the
2656 results to the same narrowness. */
2658 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2659 (unoptab == neg_optab
2660 || unoptab == one_cmpl_optab)
2661 && class == MODE_INT);
2663 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2664 unsignedp);
2666 if (temp)
2668 if (class != MODE_INT
2669 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2670 GET_MODE_BITSIZE (wider_mode)))
2672 if (target == 0)
2673 target = gen_reg_rtx (mode);
2674 convert_move (target, temp, 0);
2675 return target;
2677 else
2678 return gen_lowpart (mode, temp);
2680 else
2681 delete_insns_since (last);
2685 /* These can be done a word at a time. */
2686 if (unoptab == one_cmpl_optab
2687 && class == MODE_INT
2688 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
2689 && unoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
2691 int i;
2692 rtx insns;
2694 if (target == 0 || target == op0)
2695 target = gen_reg_rtx (mode);
2697 start_sequence ();
2699 /* Do the actual arithmetic. */
2700 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
2702 rtx target_piece = operand_subword (target, i, 1, mode);
2703 rtx x = expand_unop (word_mode, unoptab,
2704 operand_subword_force (op0, i, mode),
2705 target_piece, unsignedp);
2707 if (target_piece != x)
2708 emit_move_insn (target_piece, x);
2711 insns = get_insns ();
2712 end_sequence ();
2714 emit_no_conflict_block (insns, target, op0, NULL_RTX,
2715 gen_rtx_fmt_e (unoptab->code, mode,
2716 copy_rtx (op0)));
2717 return target;
2720 if (unoptab->code == NEG)
2722 /* Try negating floating point values by flipping the sign bit. */
2723 if (SCALAR_FLOAT_MODE_P (mode))
2725 temp = expand_absneg_bit (NEG, mode, op0, target);
2726 if (temp)
2727 return temp;
2730 /* If there is no negation pattern, and we have no negative zero,
2731 try subtracting from zero. */
2732 if (!HONOR_SIGNED_ZEROS (mode))
2734 temp = expand_binop (mode, (unoptab == negv_optab
2735 ? subv_optab : sub_optab),
2736 CONST0_RTX (mode), op0, target,
2737 unsignedp, OPTAB_DIRECT);
2738 if (temp)
2739 return temp;
2743 /* Try calculating parity (x) as popcount (x) % 2. */
2744 if (unoptab == parity_optab)
2746 temp = expand_parity (mode, op0, target);
2747 if (temp)
2748 return temp;
2751 try_libcall:
2752 /* Now try a library call in this mode. */
2753 if (unoptab->handlers[(int) mode].libfunc)
2755 rtx insns;
2756 rtx value;
2757 enum machine_mode outmode = mode;
2759 /* All of these functions return small values. Thus we choose to
2760 have them return something that isn't a double-word. */
2761 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
2762 || unoptab == popcount_optab || unoptab == parity_optab)
2763 outmode
2764 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node)));
2766 start_sequence ();
2768 /* Pass 1 for NO_QUEUE so we don't lose any increments
2769 if the libcall is cse'd or moved. */
2770 value = emit_library_call_value (unoptab->handlers[(int) mode].libfunc,
2771 NULL_RTX, LCT_CONST, outmode,
2772 1, op0, mode);
2773 insns = get_insns ();
2774 end_sequence ();
2776 target = gen_reg_rtx (outmode);
2777 emit_libcall_block (insns, target, value,
2778 gen_rtx_fmt_e (unoptab->code, outmode, op0));
2780 return target;
2783 /* It can't be done in this mode. Can we do it in a wider mode? */
2785 if (CLASS_HAS_WIDER_MODES_P (class))
2787 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2788 wider_mode != VOIDmode;
2789 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2791 if ((unoptab->handlers[(int) wider_mode].insn_code
2792 != CODE_FOR_nothing)
2793 || unoptab->handlers[(int) wider_mode].libfunc)
2795 rtx xop0 = op0;
2797 /* For certain operations, we need not actually extend
2798 the narrow operand, as long as we will truncate the
2799 results to the same narrowness. */
2801 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2802 (unoptab == neg_optab
2803 || unoptab == one_cmpl_optab)
2804 && class == MODE_INT);
2806 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2807 unsignedp);
2809 /* If we are generating clz using wider mode, adjust the
2810 result. */
2811 if (unoptab == clz_optab && temp != 0)
2812 temp = expand_binop (wider_mode, sub_optab, temp,
2813 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2814 - GET_MODE_BITSIZE (mode)),
2815 target, true, OPTAB_DIRECT);
2817 if (temp)
2819 if (class != MODE_INT)
2821 if (target == 0)
2822 target = gen_reg_rtx (mode);
2823 convert_move (target, temp, 0);
2824 return target;
2826 else
2827 return gen_lowpart (mode, temp);
2829 else
2830 delete_insns_since (last);
2835 /* One final attempt at implementing negation via subtraction,
2836 this time allowing widening of the operand. */
2837 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
2839 rtx temp;
2840 temp = expand_binop (mode,
2841 unoptab == negv_optab ? subv_optab : sub_optab,
2842 CONST0_RTX (mode), op0,
2843 target, unsignedp, OPTAB_LIB_WIDEN);
2844 if (temp)
2845 return temp;
2848 return 0;
2851 /* Emit code to compute the absolute value of OP0, with result to
2852 TARGET if convenient. (TARGET may be 0.) The return value says
2853 where the result actually is to be found.
2855 MODE is the mode of the operand; the mode of the result is
2856 different but can be deduced from MODE.
2861 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
2862 int result_unsignedp)
2864 rtx temp;
2866 if (! flag_trapv)
2867 result_unsignedp = 1;
2869 /* First try to do it with a special abs instruction. */
2870 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
2871 op0, target, 0);
2872 if (temp != 0)
2873 return temp;
2875 /* For floating point modes, try clearing the sign bit. */
2876 if (SCALAR_FLOAT_MODE_P (mode))
2878 temp = expand_absneg_bit (ABS, mode, op0, target);
2879 if (temp)
2880 return temp;
2883 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2884 if (smax_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
2885 && !HONOR_SIGNED_ZEROS (mode))
2887 rtx last = get_last_insn ();
2889 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
2890 if (temp != 0)
2891 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
2892 OPTAB_WIDEN);
2894 if (temp != 0)
2895 return temp;
2897 delete_insns_since (last);
2900 /* If this machine has expensive jumps, we can do integer absolute
2901 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2902 where W is the width of MODE. */
2904 if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
2906 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
2907 size_int (GET_MODE_BITSIZE (mode) - 1),
2908 NULL_RTX, 0);
2910 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
2911 OPTAB_LIB_WIDEN);
2912 if (temp != 0)
2913 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
2914 temp, extended, target, 0, OPTAB_LIB_WIDEN);
2916 if (temp != 0)
2917 return temp;
2920 return NULL_RTX;
2924 expand_abs (enum machine_mode mode, rtx op0, rtx target,
2925 int result_unsignedp, int safe)
2927 rtx temp, op1;
2929 if (! flag_trapv)
2930 result_unsignedp = 1;
2932 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
2933 if (temp != 0)
2934 return temp;
2936 /* If that does not win, use conditional jump and negate. */
2938 /* It is safe to use the target if it is the same
2939 as the source if this is also a pseudo register */
2940 if (op0 == target && REG_P (op0)
2941 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
2942 safe = 1;
2944 op1 = gen_label_rtx ();
2945 if (target == 0 || ! safe
2946 || GET_MODE (target) != mode
2947 || (MEM_P (target) && MEM_VOLATILE_P (target))
2948 || (REG_P (target)
2949 && REGNO (target) < FIRST_PSEUDO_REGISTER))
2950 target = gen_reg_rtx (mode);
2952 emit_move_insn (target, op0);
2953 NO_DEFER_POP;
2955 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
2956 NULL_RTX, NULL_RTX, op1);
2958 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
2959 target, target, 0);
2960 if (op0 != target)
2961 emit_move_insn (target, op0);
2962 emit_label (op1);
2963 OK_DEFER_POP;
2964 return target;
2967 /* A subroutine of expand_copysign, perform the copysign operation using the
2968 abs and neg primitives advertised to exist on the target. The assumption
2969 is that we have a split register file, and leaving op0 in fp registers,
2970 and not playing with subregs so much, will help the register allocator. */
2972 static rtx
2973 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
2974 int bitpos, bool op0_is_abs)
2976 enum machine_mode imode;
2977 HOST_WIDE_INT hi, lo;
2978 int word;
2979 rtx label;
2981 if (target == op1)
2982 target = NULL_RTX;
2984 if (!op0_is_abs)
2986 op0 = expand_unop (mode, abs_optab, op0, target, 0);
2987 if (op0 == NULL)
2988 return NULL_RTX;
2989 target = op0;
2991 else
2993 if (target == NULL_RTX)
2994 target = copy_to_reg (op0);
2995 else
2996 emit_move_insn (target, op0);
2999 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3001 imode = int_mode_for_mode (mode);
3002 if (imode == BLKmode)
3003 return NULL_RTX;
3004 op1 = gen_lowpart (imode, op1);
3006 else
3008 imode = word_mode;
3009 if (FLOAT_WORDS_BIG_ENDIAN)
3010 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3011 else
3012 word = bitpos / BITS_PER_WORD;
3013 bitpos = bitpos % BITS_PER_WORD;
3014 op1 = operand_subword_force (op1, word, mode);
3017 if (bitpos < HOST_BITS_PER_WIDE_INT)
3019 hi = 0;
3020 lo = (HOST_WIDE_INT) 1 << bitpos;
3022 else
3024 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3025 lo = 0;
3028 op1 = expand_binop (imode, and_optab, op1,
3029 immed_double_const (lo, hi, imode),
3030 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3032 label = gen_label_rtx ();
3033 emit_cmp_and_jump_insns (op1, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3035 if (GET_CODE (op0) == CONST_DOUBLE)
3036 op0 = simplify_unary_operation (NEG, mode, op0, mode);
3037 else
3038 op0 = expand_unop (mode, neg_optab, op0, target, 0);
3039 if (op0 != target)
3040 emit_move_insn (target, op0);
3042 emit_label (label);
3044 return target;
3048 /* A subroutine of expand_copysign, perform the entire copysign operation
3049 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3050 is true if op0 is known to have its sign bit clear. */
3052 static rtx
3053 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3054 int bitpos, bool op0_is_abs)
3056 enum machine_mode imode;
3057 HOST_WIDE_INT hi, lo;
3058 int word, nwords, i;
3059 rtx temp, insns;
3061 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3063 imode = int_mode_for_mode (mode);
3064 if (imode == BLKmode)
3065 return NULL_RTX;
3066 word = 0;
3067 nwords = 1;
3069 else
3071 imode = word_mode;
3073 if (FLOAT_WORDS_BIG_ENDIAN)
3074 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3075 else
3076 word = bitpos / BITS_PER_WORD;
3077 bitpos = bitpos % BITS_PER_WORD;
3078 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3081 if (bitpos < HOST_BITS_PER_WIDE_INT)
3083 hi = 0;
3084 lo = (HOST_WIDE_INT) 1 << bitpos;
3086 else
3088 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3089 lo = 0;
3092 if (target == 0 || target == op0 || target == op1)
3093 target = gen_reg_rtx (mode);
3095 if (nwords > 1)
3097 start_sequence ();
3099 for (i = 0; i < nwords; ++i)
3101 rtx targ_piece = operand_subword (target, i, 1, mode);
3102 rtx op0_piece = operand_subword_force (op0, i, mode);
3104 if (i == word)
3106 if (!op0_is_abs)
3107 op0_piece = expand_binop (imode, and_optab, op0_piece,
3108 immed_double_const (~lo, ~hi, imode),
3109 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3111 op1 = expand_binop (imode, and_optab,
3112 operand_subword_force (op1, i, mode),
3113 immed_double_const (lo, hi, imode),
3114 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3116 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3117 targ_piece, 1, OPTAB_LIB_WIDEN);
3118 if (temp != targ_piece)
3119 emit_move_insn (targ_piece, temp);
3121 else
3122 emit_move_insn (targ_piece, op0_piece);
3125 insns = get_insns ();
3126 end_sequence ();
3128 emit_no_conflict_block (insns, target, op0, op1, NULL_RTX);
3130 else
3132 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3133 immed_double_const (lo, hi, imode),
3134 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3136 op0 = gen_lowpart (imode, op0);
3137 if (!op0_is_abs)
3138 op0 = expand_binop (imode, and_optab, op0,
3139 immed_double_const (~lo, ~hi, imode),
3140 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3142 temp = expand_binop (imode, ior_optab, op0, op1,
3143 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3144 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3147 return target;
3150 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3151 scalar floating point mode. Return NULL if we do not know how to
3152 expand the operation inline. */
3155 expand_copysign (rtx op0, rtx op1, rtx target)
3157 enum machine_mode mode = GET_MODE (op0);
3158 const struct real_format *fmt;
3159 bool op0_is_abs;
3160 rtx temp;
3162 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3163 gcc_assert (GET_MODE (op1) == mode);
3165 /* First try to do it with a special instruction. */
3166 temp = expand_binop (mode, copysign_optab, op0, op1,
3167 target, 0, OPTAB_DIRECT);
3168 if (temp)
3169 return temp;
3171 fmt = REAL_MODE_FORMAT (mode);
3172 if (fmt == NULL || !fmt->has_signed_zero)
3173 return NULL_RTX;
3175 op0_is_abs = false;
3176 if (GET_CODE (op0) == CONST_DOUBLE)
3178 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3179 op0 = simplify_unary_operation (ABS, mode, op0, mode);
3180 op0_is_abs = true;
3183 if (fmt->signbit_ro >= 0
3184 && (GET_CODE (op0) == CONST_DOUBLE
3185 || (neg_optab->handlers[mode].insn_code != CODE_FOR_nothing
3186 && abs_optab->handlers[mode].insn_code != CODE_FOR_nothing)))
3188 temp = expand_copysign_absneg (mode, op0, op1, target,
3189 fmt->signbit_ro, op0_is_abs);
3190 if (temp)
3191 return temp;
3194 if (fmt->signbit_rw < 0)
3195 return NULL_RTX;
3196 return expand_copysign_bit (mode, op0, op1, target,
3197 fmt->signbit_rw, op0_is_abs);
3200 /* Generate an instruction whose insn-code is INSN_CODE,
3201 with two operands: an output TARGET and an input OP0.
3202 TARGET *must* be nonzero, and the output is always stored there.
3203 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3204 the value that is stored into TARGET. */
3206 void
3207 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
3209 rtx temp;
3210 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3211 rtx pat;
3213 temp = target;
3215 /* Now, if insn does not accept our operands, put them into pseudos. */
3217 if (!insn_data[icode].operand[1].predicate (op0, mode0))
3218 op0 = copy_to_mode_reg (mode0, op0);
3220 if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp)))
3221 temp = gen_reg_rtx (GET_MODE (temp));
3223 pat = GEN_FCN (icode) (temp, op0);
3225 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3226 add_equal_note (pat, temp, code, op0, NULL_RTX);
3228 emit_insn (pat);
3230 if (temp != target)
3231 emit_move_insn (target, temp);
3234 struct no_conflict_data
3236 rtx target, first, insn;
3237 bool must_stay;
3240 /* Called via note_stores by emit_no_conflict_block and emit_libcall_block.
3241 Set P->must_stay if the currently examined clobber / store has to stay
3242 in the list of insns that constitute the actual no_conflict block /
3243 libcall block. */
3244 static void
3245 no_conflict_move_test (rtx dest, rtx set, void *p0)
3247 struct no_conflict_data *p= p0;
3249 /* If this inns directly contributes to setting the target, it must stay. */
3250 if (reg_overlap_mentioned_p (p->target, dest))
3251 p->must_stay = true;
3252 /* If we haven't committed to keeping any other insns in the list yet,
3253 there is nothing more to check. */
3254 else if (p->insn == p->first)
3255 return;
3256 /* If this insn sets / clobbers a register that feeds one of the insns
3257 already in the list, this insn has to stay too. */
3258 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3259 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3260 || reg_used_between_p (dest, p->first, p->insn)
3261 /* Likewise if this insn depends on a register set by a previous
3262 insn in the list, or if it sets a result (presumably a hard
3263 register) that is set or clobbered by a previous insn.
3264 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3265 SET_DEST perform the former check on the address, and the latter
3266 check on the MEM. */
3267 || (GET_CODE (set) == SET
3268 && (modified_in_p (SET_SRC (set), p->first)
3269 || modified_in_p (SET_DEST (set), p->first)
3270 || modified_between_p (SET_SRC (set), p->first, p->insn)
3271 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3272 p->must_stay = true;
3275 /* Encapsulate the block starting at FIRST and ending with LAST, which is
3276 logically equivalent to EQUIV, so it gets manipulated as a unit if it
3277 is possible to do so. */
3279 static void
3280 maybe_encapsulate_block (rtx first, rtx last, rtx equiv)
3282 if (!flag_non_call_exceptions || !may_trap_p (equiv))
3284 /* We can't attach the REG_LIBCALL and REG_RETVAL notes when the
3285 encapsulated region would not be in one basic block, i.e. when
3286 there is a control_flow_insn_p insn between FIRST and LAST. */
3287 bool attach_libcall_retval_notes = true;
3288 rtx insn, next = NEXT_INSN (last);
3290 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3291 if (control_flow_insn_p (insn))
3293 attach_libcall_retval_notes = false;
3294 break;
3297 if (attach_libcall_retval_notes)
3299 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3300 REG_NOTES (first));
3301 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first,
3302 REG_NOTES (last));
3307 /* Emit code to perform a series of operations on a multi-word quantity, one
3308 word at a time.
3310 Such a block is preceded by a CLOBBER of the output, consists of multiple
3311 insns, each setting one word of the output, and followed by a SET copying
3312 the output to itself.
3314 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3315 note indicating that it doesn't conflict with the (also multi-word)
3316 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3317 notes.
3319 INSNS is a block of code generated to perform the operation, not including
3320 the CLOBBER and final copy. All insns that compute intermediate values
3321 are first emitted, followed by the block as described above.
3323 TARGET, OP0, and OP1 are the output and inputs of the operations,
3324 respectively. OP1 may be zero for a unary operation.
3326 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3327 on the last insn.
3329 If TARGET is not a register, INSNS is simply emitted with no special
3330 processing. Likewise if anything in INSNS is not an INSN or if
3331 there is a libcall block inside INSNS.
3333 The final insn emitted is returned. */
3336 emit_no_conflict_block (rtx insns, rtx target, rtx op0, rtx op1, rtx equiv)
3338 rtx prev, next, first, last, insn;
3340 if (!REG_P (target) || reload_in_progress)
3341 return emit_insn (insns);
3342 else
3343 for (insn = insns; insn; insn = NEXT_INSN (insn))
3344 if (!NONJUMP_INSN_P (insn)
3345 || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
3346 return emit_insn (insns);
3348 /* First emit all insns that do not store into words of the output and remove
3349 these from the list. */
3350 for (insn = insns; insn; insn = next)
3352 rtx note;
3353 struct no_conflict_data data;
3355 next = NEXT_INSN (insn);
3357 /* Some ports (cris) create a libcall regions at their own. We must
3358 avoid any potential nesting of LIBCALLs. */
3359 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3360 remove_note (insn, note);
3361 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3362 remove_note (insn, note);
3364 data.target = target;
3365 data.first = insns;
3366 data.insn = insn;
3367 data.must_stay = 0;
3368 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3369 if (! data.must_stay)
3371 if (PREV_INSN (insn))
3372 NEXT_INSN (PREV_INSN (insn)) = next;
3373 else
3374 insns = next;
3376 if (next)
3377 PREV_INSN (next) = PREV_INSN (insn);
3379 add_insn (insn);
3383 prev = get_last_insn ();
3385 /* Now write the CLOBBER of the output, followed by the setting of each
3386 of the words, followed by the final copy. */
3387 if (target != op0 && target != op1)
3388 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
3390 for (insn = insns; insn; insn = next)
3392 next = NEXT_INSN (insn);
3393 add_insn (insn);
3395 if (op1 && REG_P (op1))
3396 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op1,
3397 REG_NOTES (insn));
3399 if (op0 && REG_P (op0))
3400 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op0,
3401 REG_NOTES (insn));
3404 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3405 != CODE_FOR_nothing)
3407 last = emit_move_insn (target, target);
3408 if (equiv)
3409 set_unique_reg_note (last, REG_EQUAL, equiv);
3411 else
3413 last = get_last_insn ();
3415 /* Remove any existing REG_EQUAL note from "last", or else it will
3416 be mistaken for a note referring to the full contents of the
3417 alleged libcall value when found together with the REG_RETVAL
3418 note added below. An existing note can come from an insn
3419 expansion at "last". */
3420 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3423 if (prev == 0)
3424 first = get_insns ();
3425 else
3426 first = NEXT_INSN (prev);
3428 maybe_encapsulate_block (first, last, equiv);
3430 return last;
3433 /* Emit code to make a call to a constant function or a library call.
3435 INSNS is a list containing all insns emitted in the call.
3436 These insns leave the result in RESULT. Our block is to copy RESULT
3437 to TARGET, which is logically equivalent to EQUIV.
3439 We first emit any insns that set a pseudo on the assumption that these are
3440 loading constants into registers; doing so allows them to be safely cse'ed
3441 between blocks. Then we emit all the other insns in the block, followed by
3442 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3443 note with an operand of EQUIV.
3445 Moving assignments to pseudos outside of the block is done to improve
3446 the generated code, but is not required to generate correct code,
3447 hence being unable to move an assignment is not grounds for not making
3448 a libcall block. There are two reasons why it is safe to leave these
3449 insns inside the block: First, we know that these pseudos cannot be
3450 used in generated RTL outside the block since they are created for
3451 temporary purposes within the block. Second, CSE will not record the
3452 values of anything set inside a libcall block, so we know they must
3453 be dead at the end of the block.
3455 Except for the first group of insns (the ones setting pseudos), the
3456 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3458 void
3459 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3461 rtx final_dest = target;
3462 rtx prev, next, first, last, insn;
3464 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3465 into a MEM later. Protect the libcall block from this change. */
3466 if (! REG_P (target) || REG_USERVAR_P (target))
3467 target = gen_reg_rtx (GET_MODE (target));
3469 /* If we're using non-call exceptions, a libcall corresponding to an
3470 operation that may trap may also trap. */
3471 if (flag_non_call_exceptions && may_trap_p (equiv))
3473 for (insn = insns; insn; insn = NEXT_INSN (insn))
3474 if (CALL_P (insn))
3476 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3478 if (note != 0 && INTVAL (XEXP (note, 0)) <= 0)
3479 remove_note (insn, note);
3482 else
3483 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3484 reg note to indicate that this call cannot throw or execute a nonlocal
3485 goto (unless there is already a REG_EH_REGION note, in which case
3486 we update it). */
3487 for (insn = insns; insn; insn = NEXT_INSN (insn))
3488 if (CALL_P (insn))
3490 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3492 if (note != 0)
3493 XEXP (note, 0) = constm1_rtx;
3494 else
3495 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx,
3496 REG_NOTES (insn));
3499 /* First emit all insns that set pseudos. Remove them from the list as
3500 we go. Avoid insns that set pseudos which were referenced in previous
3501 insns. These can be generated by move_by_pieces, for example,
3502 to update an address. Similarly, avoid insns that reference things
3503 set in previous insns. */
3505 for (insn = insns; insn; insn = next)
3507 rtx set = single_set (insn);
3508 rtx note;
3510 /* Some ports (cris) create a libcall regions at their own. We must
3511 avoid any potential nesting of LIBCALLs. */
3512 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3513 remove_note (insn, note);
3514 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3515 remove_note (insn, note);
3517 next = NEXT_INSN (insn);
3519 if (set != 0 && REG_P (SET_DEST (set))
3520 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3522 struct no_conflict_data data;
3524 data.target = const0_rtx;
3525 data.first = insns;
3526 data.insn = insn;
3527 data.must_stay = 0;
3528 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3529 if (! data.must_stay)
3531 if (PREV_INSN (insn))
3532 NEXT_INSN (PREV_INSN (insn)) = next;
3533 else
3534 insns = next;
3536 if (next)
3537 PREV_INSN (next) = PREV_INSN (insn);
3539 add_insn (insn);
3543 /* Some ports use a loop to copy large arguments onto the stack.
3544 Don't move anything outside such a loop. */
3545 if (LABEL_P (insn))
3546 break;
3549 prev = get_last_insn ();
3551 /* Write the remaining insns followed by the final copy. */
3553 for (insn = insns; insn; insn = next)
3555 next = NEXT_INSN (insn);
3557 add_insn (insn);
3560 last = emit_move_insn (target, result);
3561 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3562 != CODE_FOR_nothing)
3563 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
3564 else
3566 /* Remove any existing REG_EQUAL note from "last", or else it will
3567 be mistaken for a note referring to the full contents of the
3568 libcall value when found together with the REG_RETVAL note added
3569 below. An existing note can come from an insn expansion at
3570 "last". */
3571 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3574 if (final_dest != target)
3575 emit_move_insn (final_dest, target);
3577 if (prev == 0)
3578 first = get_insns ();
3579 else
3580 first = NEXT_INSN (prev);
3582 maybe_encapsulate_block (first, last, equiv);
3585 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3586 PURPOSE describes how this comparison will be used. CODE is the rtx
3587 comparison code we will be using.
3589 ??? Actually, CODE is slightly weaker than that. A target is still
3590 required to implement all of the normal bcc operations, but not
3591 required to implement all (or any) of the unordered bcc operations. */
3594 can_compare_p (enum rtx_code code, enum machine_mode mode,
3595 enum can_compare_purpose purpose)
3599 if (cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3601 if (purpose == ccp_jump)
3602 return bcc_gen_fctn[(int) code] != NULL;
3603 else if (purpose == ccp_store_flag)
3604 return setcc_gen_code[(int) code] != CODE_FOR_nothing;
3605 else
3606 /* There's only one cmov entry point, and it's allowed to fail. */
3607 return 1;
3609 if (purpose == ccp_jump
3610 && cbranch_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3611 return 1;
3612 if (purpose == ccp_cmov
3613 && cmov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3614 return 1;
3615 if (purpose == ccp_store_flag
3616 && cstore_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3617 return 1;
3618 mode = GET_MODE_WIDER_MODE (mode);
3620 while (mode != VOIDmode);
3622 return 0;
3625 /* This function is called when we are going to emit a compare instruction that
3626 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3628 *PMODE is the mode of the inputs (in case they are const_int).
3629 *PUNSIGNEDP nonzero says that the operands are unsigned;
3630 this matters if they need to be widened.
3632 If they have mode BLKmode, then SIZE specifies the size of both operands.
3634 This function performs all the setup necessary so that the caller only has
3635 to emit a single comparison insn. This setup can involve doing a BLKmode
3636 comparison or emitting a library call to perform the comparison if no insn
3637 is available to handle it.
3638 The values which are passed in through pointers can be modified; the caller
3639 should perform the comparison on the modified values. Constant
3640 comparisons must have already been folded. */
3642 static void
3643 prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
3644 enum machine_mode *pmode, int *punsignedp,
3645 enum can_compare_purpose purpose)
3647 enum machine_mode mode = *pmode;
3648 rtx x = *px, y = *py;
3649 int unsignedp = *punsignedp;
3651 /* If we are inside an appropriately-short loop and we are optimizing,
3652 force expensive constants into a register. */
3653 if (CONSTANT_P (x) && optimize
3654 && rtx_cost (x, COMPARE) > COSTS_N_INSNS (1))
3655 x = force_reg (mode, x);
3657 if (CONSTANT_P (y) && optimize
3658 && rtx_cost (y, COMPARE) > COSTS_N_INSNS (1))
3659 y = force_reg (mode, y);
3661 #ifdef HAVE_cc0
3662 /* Make sure if we have a canonical comparison. The RTL
3663 documentation states that canonical comparisons are required only
3664 for targets which have cc0. */
3665 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3666 #endif
3668 /* Don't let both operands fail to indicate the mode. */
3669 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3670 x = force_reg (mode, x);
3672 /* Handle all BLKmode compares. */
3674 if (mode == BLKmode)
3676 enum machine_mode cmp_mode, result_mode;
3677 enum insn_code cmp_code;
3678 tree length_type;
3679 rtx libfunc;
3680 rtx result;
3681 rtx opalign
3682 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3684 gcc_assert (size);
3686 /* Try to use a memory block compare insn - either cmpstr
3687 or cmpmem will do. */
3688 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
3689 cmp_mode != VOIDmode;
3690 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
3692 cmp_code = cmpmem_optab[cmp_mode];
3693 if (cmp_code == CODE_FOR_nothing)
3694 cmp_code = cmpstr_optab[cmp_mode];
3695 if (cmp_code == CODE_FOR_nothing)
3696 cmp_code = cmpstrn_optab[cmp_mode];
3697 if (cmp_code == CODE_FOR_nothing)
3698 continue;
3700 /* Must make sure the size fits the insn's mode. */
3701 if ((GET_CODE (size) == CONST_INT
3702 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
3703 || (GET_MODE_BITSIZE (GET_MODE (size))
3704 > GET_MODE_BITSIZE (cmp_mode)))
3705 continue;
3707 result_mode = insn_data[cmp_code].operand[0].mode;
3708 result = gen_reg_rtx (result_mode);
3709 size = convert_to_mode (cmp_mode, size, 1);
3710 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
3712 *px = result;
3713 *py = const0_rtx;
3714 *pmode = result_mode;
3715 return;
3718 /* Otherwise call a library function, memcmp. */
3719 libfunc = memcmp_libfunc;
3720 length_type = sizetype;
3721 result_mode = TYPE_MODE (integer_type_node);
3722 cmp_mode = TYPE_MODE (length_type);
3723 size = convert_to_mode (TYPE_MODE (length_type), size,
3724 TYPE_UNSIGNED (length_type));
3726 result = emit_library_call_value (libfunc, 0, LCT_PURE_MAKE_BLOCK,
3727 result_mode, 3,
3728 XEXP (x, 0), Pmode,
3729 XEXP (y, 0), Pmode,
3730 size, cmp_mode);
3731 *px = result;
3732 *py = const0_rtx;
3733 *pmode = result_mode;
3734 return;
3737 /* Don't allow operands to the compare to trap, as that can put the
3738 compare and branch in different basic blocks. */
3739 if (flag_non_call_exceptions)
3741 if (may_trap_p (x))
3742 x = force_reg (mode, x);
3743 if (may_trap_p (y))
3744 y = force_reg (mode, y);
3747 *px = x;
3748 *py = y;
3749 if (can_compare_p (*pcomparison, mode, purpose))
3750 return;
3752 /* Handle a lib call just for the mode we are using. */
3754 if (cmp_optab->handlers[(int) mode].libfunc && !SCALAR_FLOAT_MODE_P (mode))
3756 rtx libfunc = cmp_optab->handlers[(int) mode].libfunc;
3757 rtx result;
3759 /* If we want unsigned, and this mode has a distinct unsigned
3760 comparison routine, use that. */
3761 if (unsignedp && ucmp_optab->handlers[(int) mode].libfunc)
3762 libfunc = ucmp_optab->handlers[(int) mode].libfunc;
3764 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK,
3765 word_mode, 2, x, mode, y, mode);
3767 /* There are two kinds of comparison routines. Biased routines
3768 return 0/1/2, and unbiased routines return -1/0/1. Other parts
3769 of gcc expect that the comparison operation is equivalent
3770 to the modified comparison. For signed comparisons compare the
3771 result against 1 in the biased case, and zero in the unbiased
3772 case. For unsigned comparisons always compare against 1 after
3773 biasing the unbiased result by adding 1. This gives us a way to
3774 represent LTU. */
3775 *px = result;
3776 *pmode = word_mode;
3777 *py = const1_rtx;
3779 if (!TARGET_LIB_INT_CMP_BIASED)
3781 if (*punsignedp)
3782 *px = plus_constant (result, 1);
3783 else
3784 *py = const0_rtx;
3786 return;
3789 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3790 prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
3793 /* Before emitting an insn with code ICODE, make sure that X, which is going
3794 to be used for operand OPNUM of the insn, is converted from mode MODE to
3795 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3796 that it is accepted by the operand predicate. Return the new value. */
3798 static rtx
3799 prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
3800 enum machine_mode wider_mode, int unsignedp)
3802 if (mode != wider_mode)
3803 x = convert_modes (wider_mode, mode, x, unsignedp);
3805 if (!insn_data[icode].operand[opnum].predicate
3806 (x, insn_data[icode].operand[opnum].mode))
3808 if (no_new_pseudos)
3809 return NULL_RTX;
3810 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x);
3813 return x;
3816 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3817 we can do the comparison.
3818 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3819 be NULL_RTX which indicates that only a comparison is to be generated. */
3821 static void
3822 emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
3823 enum rtx_code comparison, int unsignedp, rtx label)
3825 rtx test = gen_rtx_fmt_ee (comparison, mode, x, y);
3826 enum mode_class class = GET_MODE_CLASS (mode);
3827 enum machine_mode wider_mode = mode;
3829 /* Try combined insns first. */
3832 enum insn_code icode;
3833 PUT_MODE (test, wider_mode);
3835 if (label)
3837 icode = cbranch_optab->handlers[(int) wider_mode].insn_code;
3839 if (icode != CODE_FOR_nothing
3840 && insn_data[icode].operand[0].predicate (test, wider_mode))
3842 x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp);
3843 y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp);
3844 emit_jump_insn (GEN_FCN (icode) (test, x, y, label));
3845 return;
3849 /* Handle some compares against zero. */
3850 icode = (int) tst_optab->handlers[(int) wider_mode].insn_code;
3851 if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing)
3853 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3854 emit_insn (GEN_FCN (icode) (x));
3855 if (label)
3856 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3857 return;
3860 /* Handle compares for which there is a directly suitable insn. */
3862 icode = (int) cmp_optab->handlers[(int) wider_mode].insn_code;
3863 if (icode != CODE_FOR_nothing)
3865 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3866 y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp);
3867 emit_insn (GEN_FCN (icode) (x, y));
3868 if (label)
3869 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3870 return;
3873 if (!CLASS_HAS_WIDER_MODES_P (class))
3874 break;
3876 wider_mode = GET_MODE_WIDER_MODE (wider_mode);
3878 while (wider_mode != VOIDmode);
3880 gcc_unreachable ();
3883 /* Generate code to compare X with Y so that the condition codes are
3884 set and to jump to LABEL if the condition is true. If X is a
3885 constant and Y is not a constant, then the comparison is swapped to
3886 ensure that the comparison RTL has the canonical form.
3888 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3889 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3890 the proper branch condition code.
3892 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3894 MODE is the mode of the inputs (in case they are const_int).
3896 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3897 be passed unchanged to emit_cmp_insn, then potentially converted into an
3898 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3900 void
3901 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
3902 enum machine_mode mode, int unsignedp, rtx label)
3904 rtx op0 = x, op1 = y;
3906 /* Swap operands and condition to ensure canonical RTL. */
3907 if (swap_commutative_operands_p (x, y))
3909 /* If we're not emitting a branch, this means some caller
3910 is out of sync. */
3911 gcc_assert (label);
3913 op0 = y, op1 = x;
3914 comparison = swap_condition (comparison);
3917 #ifdef HAVE_cc0
3918 /* If OP0 is still a constant, then both X and Y must be constants.
3919 Force X into a register to create canonical RTL. */
3920 if (CONSTANT_P (op0))
3921 op0 = force_reg (mode, op0);
3922 #endif
3924 if (unsignedp)
3925 comparison = unsigned_condition (comparison);
3927 prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp,
3928 ccp_jump);
3929 emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label);
3932 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3934 void
3935 emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
3936 enum machine_mode mode, int unsignedp)
3938 emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0);
3941 /* Emit a library call comparison between floating point X and Y.
3942 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3944 static void
3945 prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
3946 enum machine_mode *pmode, int *punsignedp)
3948 enum rtx_code comparison = *pcomparison;
3949 enum rtx_code swapped = swap_condition (comparison);
3950 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
3951 rtx x = *px;
3952 rtx y = *py;
3953 enum machine_mode orig_mode = GET_MODE (x);
3954 enum machine_mode mode;
3955 rtx value, target, insns, equiv;
3956 rtx libfunc = 0;
3957 bool reversed_p = false;
3959 for (mode = orig_mode;
3960 mode != VOIDmode;
3961 mode = GET_MODE_WIDER_MODE (mode))
3963 if ((libfunc = code_to_optab[comparison]->handlers[mode].libfunc))
3964 break;
3966 if ((libfunc = code_to_optab[swapped]->handlers[mode].libfunc))
3968 rtx tmp;
3969 tmp = x; x = y; y = tmp;
3970 comparison = swapped;
3971 break;
3974 if ((libfunc = code_to_optab[reversed]->handlers[mode].libfunc)
3975 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, reversed))
3977 comparison = reversed;
3978 reversed_p = true;
3979 break;
3983 gcc_assert (mode != VOIDmode);
3985 if (mode != orig_mode)
3987 x = convert_to_mode (mode, x, 0);
3988 y = convert_to_mode (mode, y, 0);
3991 /* Attach a REG_EQUAL note describing the semantics of the libcall to
3992 the RTL. The allows the RTL optimizers to delete the libcall if the
3993 condition can be determined at compile-time. */
3994 if (comparison == UNORDERED)
3996 rtx temp = simplify_gen_relational (NE, word_mode, mode, x, x);
3997 equiv = simplify_gen_relational (NE, word_mode, mode, y, y);
3998 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
3999 temp, const_true_rtx, equiv);
4001 else
4003 equiv = simplify_gen_relational (comparison, word_mode, mode, x, y);
4004 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4006 rtx true_rtx, false_rtx;
4008 switch (comparison)
4010 case EQ:
4011 true_rtx = const0_rtx;
4012 false_rtx = const_true_rtx;
4013 break;
4015 case NE:
4016 true_rtx = const_true_rtx;
4017 false_rtx = const0_rtx;
4018 break;
4020 case GT:
4021 true_rtx = const1_rtx;
4022 false_rtx = const0_rtx;
4023 break;
4025 case GE:
4026 true_rtx = const0_rtx;
4027 false_rtx = constm1_rtx;
4028 break;
4030 case LT:
4031 true_rtx = constm1_rtx;
4032 false_rtx = const0_rtx;
4033 break;
4035 case LE:
4036 true_rtx = const0_rtx;
4037 false_rtx = const1_rtx;
4038 break;
4040 default:
4041 gcc_unreachable ();
4043 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
4044 equiv, true_rtx, false_rtx);
4048 start_sequence ();
4049 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4050 word_mode, 2, x, mode, y, mode);
4051 insns = get_insns ();
4052 end_sequence ();
4054 target = gen_reg_rtx (word_mode);
4055 emit_libcall_block (insns, target, value, equiv);
4057 if (comparison == UNORDERED
4058 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4059 comparison = reversed_p ? EQ : NE;
4061 *px = target;
4062 *py = const0_rtx;
4063 *pmode = word_mode;
4064 *pcomparison = comparison;
4065 *punsignedp = 0;
4068 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4070 void
4071 emit_indirect_jump (rtx loc)
4073 if (!insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate
4074 (loc, Pmode))
4075 loc = copy_to_mode_reg (Pmode, loc);
4077 emit_jump_insn (gen_indirect_jump (loc));
4078 emit_barrier ();
4081 #ifdef HAVE_conditional_move
4083 /* Emit a conditional move instruction if the machine supports one for that
4084 condition and machine mode.
4086 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4087 the mode to use should they be constants. If it is VOIDmode, they cannot
4088 both be constants.
4090 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4091 should be stored there. MODE is the mode to use should they be constants.
4092 If it is VOIDmode, they cannot both be constants.
4094 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4095 is not supported. */
4098 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4099 enum machine_mode cmode, rtx op2, rtx op3,
4100 enum machine_mode mode, int unsignedp)
4102 rtx tem, subtarget, comparison, insn;
4103 enum insn_code icode;
4104 enum rtx_code reversed;
4106 /* If one operand is constant, make it the second one. Only do this
4107 if the other operand is not constant as well. */
4109 if (swap_commutative_operands_p (op0, op1))
4111 tem = op0;
4112 op0 = op1;
4113 op1 = tem;
4114 code = swap_condition (code);
4117 /* get_condition will prefer to generate LT and GT even if the old
4118 comparison was against zero, so undo that canonicalization here since
4119 comparisons against zero are cheaper. */
4120 if (code == LT && op1 == const1_rtx)
4121 code = LE, op1 = const0_rtx;
4122 else if (code == GT && op1 == constm1_rtx)
4123 code = GE, op1 = const0_rtx;
4125 if (cmode == VOIDmode)
4126 cmode = GET_MODE (op0);
4128 if (swap_commutative_operands_p (op2, op3)
4129 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4130 != UNKNOWN))
4132 tem = op2;
4133 op2 = op3;
4134 op3 = tem;
4135 code = reversed;
4138 if (mode == VOIDmode)
4139 mode = GET_MODE (op2);
4141 icode = movcc_gen_code[mode];
4143 if (icode == CODE_FOR_nothing)
4144 return 0;
4146 if (!target)
4147 target = gen_reg_rtx (mode);
4149 subtarget = target;
4151 /* If the insn doesn't accept these operands, put them in pseudos. */
4153 if (!insn_data[icode].operand[0].predicate
4154 (subtarget, insn_data[icode].operand[0].mode))
4155 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4157 if (!insn_data[icode].operand[2].predicate
4158 (op2, insn_data[icode].operand[2].mode))
4159 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4161 if (!insn_data[icode].operand[3].predicate
4162 (op3, insn_data[icode].operand[3].mode))
4163 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4165 /* Everything should now be in the suitable form, so emit the compare insn
4166 and then the conditional move. */
4168 comparison
4169 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4171 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4172 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4173 return NULL and let the caller figure out how best to deal with this
4174 situation. */
4175 if (GET_CODE (comparison) != code)
4176 return NULL_RTX;
4178 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4180 /* If that failed, then give up. */
4181 if (insn == 0)
4182 return 0;
4184 emit_insn (insn);
4186 if (subtarget != target)
4187 convert_move (target, subtarget, 0);
4189 return target;
4192 /* Return nonzero if a conditional move of mode MODE is supported.
4194 This function is for combine so it can tell whether an insn that looks
4195 like a conditional move is actually supported by the hardware. If we
4196 guess wrong we lose a bit on optimization, but that's it. */
4197 /* ??? sparc64 supports conditionally moving integers values based on fp
4198 comparisons, and vice versa. How do we handle them? */
4201 can_conditionally_move_p (enum machine_mode mode)
4203 if (movcc_gen_code[mode] != CODE_FOR_nothing)
4204 return 1;
4206 return 0;
4209 #endif /* HAVE_conditional_move */
4211 /* Emit a conditional addition instruction if the machine supports one for that
4212 condition and machine mode.
4214 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4215 the mode to use should they be constants. If it is VOIDmode, they cannot
4216 both be constants.
4218 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4219 should be stored there. MODE is the mode to use should they be constants.
4220 If it is VOIDmode, they cannot both be constants.
4222 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4223 is not supported. */
4226 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4227 enum machine_mode cmode, rtx op2, rtx op3,
4228 enum machine_mode mode, int unsignedp)
4230 rtx tem, subtarget, comparison, insn;
4231 enum insn_code icode;
4232 enum rtx_code reversed;
4234 /* If one operand is constant, make it the second one. Only do this
4235 if the other operand is not constant as well. */
4237 if (swap_commutative_operands_p (op0, op1))
4239 tem = op0;
4240 op0 = op1;
4241 op1 = tem;
4242 code = swap_condition (code);
4245 /* get_condition will prefer to generate LT and GT even if the old
4246 comparison was against zero, so undo that canonicalization here since
4247 comparisons against zero are cheaper. */
4248 if (code == LT && op1 == const1_rtx)
4249 code = LE, op1 = const0_rtx;
4250 else if (code == GT && op1 == constm1_rtx)
4251 code = GE, op1 = const0_rtx;
4253 if (cmode == VOIDmode)
4254 cmode = GET_MODE (op0);
4256 if (swap_commutative_operands_p (op2, op3)
4257 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4258 != UNKNOWN))
4260 tem = op2;
4261 op2 = op3;
4262 op3 = tem;
4263 code = reversed;
4266 if (mode == VOIDmode)
4267 mode = GET_MODE (op2);
4269 icode = addcc_optab->handlers[(int) mode].insn_code;
4271 if (icode == CODE_FOR_nothing)
4272 return 0;
4274 if (!target)
4275 target = gen_reg_rtx (mode);
4277 /* If the insn doesn't accept these operands, put them in pseudos. */
4279 if (!insn_data[icode].operand[0].predicate
4280 (target, insn_data[icode].operand[0].mode))
4281 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4282 else
4283 subtarget = target;
4285 if (!insn_data[icode].operand[2].predicate
4286 (op2, insn_data[icode].operand[2].mode))
4287 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4289 if (!insn_data[icode].operand[3].predicate
4290 (op3, insn_data[icode].operand[3].mode))
4291 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4293 /* Everything should now be in the suitable form, so emit the compare insn
4294 and then the conditional move. */
4296 comparison
4297 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4299 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4300 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4301 return NULL and let the caller figure out how best to deal with this
4302 situation. */
4303 if (GET_CODE (comparison) != code)
4304 return NULL_RTX;
4306 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4308 /* If that failed, then give up. */
4309 if (insn == 0)
4310 return 0;
4312 emit_insn (insn);
4314 if (subtarget != target)
4315 convert_move (target, subtarget, 0);
4317 return target;
4320 /* These functions attempt to generate an insn body, rather than
4321 emitting the insn, but if the gen function already emits them, we
4322 make no attempt to turn them back into naked patterns. */
4324 /* Generate and return an insn body to add Y to X. */
4327 gen_add2_insn (rtx x, rtx y)
4329 int icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4331 gcc_assert (insn_data[icode].operand[0].predicate
4332 (x, insn_data[icode].operand[0].mode));
4333 gcc_assert (insn_data[icode].operand[1].predicate
4334 (x, insn_data[icode].operand[1].mode));
4335 gcc_assert (insn_data[icode].operand[2].predicate
4336 (y, insn_data[icode].operand[2].mode));
4338 return GEN_FCN (icode) (x, x, y);
4341 /* Generate and return an insn body to add r1 and c,
4342 storing the result in r0. */
4344 gen_add3_insn (rtx r0, rtx r1, rtx c)
4346 int icode = (int) add_optab->handlers[(int) GET_MODE (r0)].insn_code;
4348 if (icode == CODE_FOR_nothing
4349 || !(insn_data[icode].operand[0].predicate
4350 (r0, insn_data[icode].operand[0].mode))
4351 || !(insn_data[icode].operand[1].predicate
4352 (r1, insn_data[icode].operand[1].mode))
4353 || !(insn_data[icode].operand[2].predicate
4354 (c, insn_data[icode].operand[2].mode)))
4355 return NULL_RTX;
4357 return GEN_FCN (icode) (r0, r1, c);
4361 have_add2_insn (rtx x, rtx y)
4363 int icode;
4365 gcc_assert (GET_MODE (x) != VOIDmode);
4367 icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4369 if (icode == CODE_FOR_nothing)
4370 return 0;
4372 if (!(insn_data[icode].operand[0].predicate
4373 (x, insn_data[icode].operand[0].mode))
4374 || !(insn_data[icode].operand[1].predicate
4375 (x, insn_data[icode].operand[1].mode))
4376 || !(insn_data[icode].operand[2].predicate
4377 (y, insn_data[icode].operand[2].mode)))
4378 return 0;
4380 return 1;
4383 /* Generate and return an insn body to subtract Y from X. */
4386 gen_sub2_insn (rtx x, rtx y)
4388 int icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4390 gcc_assert (insn_data[icode].operand[0].predicate
4391 (x, insn_data[icode].operand[0].mode));
4392 gcc_assert (insn_data[icode].operand[1].predicate
4393 (x, insn_data[icode].operand[1].mode));
4394 gcc_assert (insn_data[icode].operand[2].predicate
4395 (y, insn_data[icode].operand[2].mode));
4397 return GEN_FCN (icode) (x, x, y);
4400 /* Generate and return an insn body to subtract r1 and c,
4401 storing the result in r0. */
4403 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4405 int icode = (int) sub_optab->handlers[(int) GET_MODE (r0)].insn_code;
4407 if (icode == CODE_FOR_nothing
4408 || !(insn_data[icode].operand[0].predicate
4409 (r0, insn_data[icode].operand[0].mode))
4410 || !(insn_data[icode].operand[1].predicate
4411 (r1, insn_data[icode].operand[1].mode))
4412 || !(insn_data[icode].operand[2].predicate
4413 (c, insn_data[icode].operand[2].mode)))
4414 return NULL_RTX;
4416 return GEN_FCN (icode) (r0, r1, c);
4420 have_sub2_insn (rtx x, rtx y)
4422 int icode;
4424 gcc_assert (GET_MODE (x) != VOIDmode);
4426 icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4428 if (icode == CODE_FOR_nothing)
4429 return 0;
4431 if (!(insn_data[icode].operand[0].predicate
4432 (x, insn_data[icode].operand[0].mode))
4433 || !(insn_data[icode].operand[1].predicate
4434 (x, insn_data[icode].operand[1].mode))
4435 || !(insn_data[icode].operand[2].predicate
4436 (y, insn_data[icode].operand[2].mode)))
4437 return 0;
4439 return 1;
4442 /* Generate the body of an instruction to copy Y into X.
4443 It may be a list of insns, if one insn isn't enough. */
4446 gen_move_insn (rtx x, rtx y)
4448 rtx seq;
4450 start_sequence ();
4451 emit_move_insn_1 (x, y);
4452 seq = get_insns ();
4453 end_sequence ();
4454 return seq;
4457 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4458 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4459 no such operation exists, CODE_FOR_nothing will be returned. */
4461 enum insn_code
4462 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4463 int unsignedp)
4465 convert_optab tab;
4466 #ifdef HAVE_ptr_extend
4467 if (unsignedp < 0)
4468 return CODE_FOR_ptr_extend;
4469 #endif
4471 tab = unsignedp ? zext_optab : sext_optab;
4472 return tab->handlers[to_mode][from_mode].insn_code;
4475 /* Generate the body of an insn to extend Y (with mode MFROM)
4476 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4479 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4480 enum machine_mode mfrom, int unsignedp)
4482 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4483 return GEN_FCN (icode) (x, y);
4486 /* can_fix_p and can_float_p say whether the target machine
4487 can directly convert a given fixed point type to
4488 a given floating point type, or vice versa.
4489 The returned value is the CODE_FOR_... value to use,
4490 or CODE_FOR_nothing if these modes cannot be directly converted.
4492 *TRUNCP_PTR is set to 1 if it is necessary to output
4493 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4495 static enum insn_code
4496 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4497 int unsignedp, int *truncp_ptr)
4499 convert_optab tab;
4500 enum insn_code icode;
4502 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4503 icode = tab->handlers[fixmode][fltmode].insn_code;
4504 if (icode != CODE_FOR_nothing)
4506 *truncp_ptr = 0;
4507 return icode;
4510 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4511 for this to work. We need to rework the fix* and ftrunc* patterns
4512 and documentation. */
4513 tab = unsignedp ? ufix_optab : sfix_optab;
4514 icode = tab->handlers[fixmode][fltmode].insn_code;
4515 if (icode != CODE_FOR_nothing
4516 && ftrunc_optab->handlers[fltmode].insn_code != CODE_FOR_nothing)
4518 *truncp_ptr = 1;
4519 return icode;
4522 *truncp_ptr = 0;
4523 return CODE_FOR_nothing;
4526 static enum insn_code
4527 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4528 int unsignedp)
4530 convert_optab tab;
4532 tab = unsignedp ? ufloat_optab : sfloat_optab;
4533 return tab->handlers[fltmode][fixmode].insn_code;
4536 /* Generate code to convert FROM to floating point
4537 and store in TO. FROM must be fixed point and not VOIDmode.
4538 UNSIGNEDP nonzero means regard FROM as unsigned.
4539 Normally this is done by correcting the final value
4540 if it is negative. */
4542 void
4543 expand_float (rtx to, rtx from, int unsignedp)
4545 enum insn_code icode;
4546 rtx target = to;
4547 enum machine_mode fmode, imode;
4548 bool can_do_signed = false;
4550 /* Crash now, because we won't be able to decide which mode to use. */
4551 gcc_assert (GET_MODE (from) != VOIDmode);
4553 /* Look for an insn to do the conversion. Do it in the specified
4554 modes if possible; otherwise convert either input, output or both to
4555 wider mode. If the integer mode is wider than the mode of FROM,
4556 we can do the conversion signed even if the input is unsigned. */
4558 for (fmode = GET_MODE (to); fmode != VOIDmode;
4559 fmode = GET_MODE_WIDER_MODE (fmode))
4560 for (imode = GET_MODE (from); imode != VOIDmode;
4561 imode = GET_MODE_WIDER_MODE (imode))
4563 int doing_unsigned = unsignedp;
4565 if (fmode != GET_MODE (to)
4566 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from)))
4567 continue;
4569 icode = can_float_p (fmode, imode, unsignedp);
4570 if (icode == CODE_FOR_nothing && unsignedp)
4572 enum insn_code scode = can_float_p (fmode, imode, 0);
4573 if (scode != CODE_FOR_nothing)
4574 can_do_signed = true;
4575 if (imode != GET_MODE (from))
4576 icode = scode, doing_unsigned = 0;
4579 if (icode != CODE_FOR_nothing)
4581 if (imode != GET_MODE (from))
4582 from = convert_to_mode (imode, from, unsignedp);
4584 if (fmode != GET_MODE (to))
4585 target = gen_reg_rtx (fmode);
4587 emit_unop_insn (icode, target, from,
4588 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4590 if (target != to)
4591 convert_move (to, target, 0);
4592 return;
4596 /* Unsigned integer, and no way to convert directly. For binary
4597 floating point modes, convert as signed, then conditionally adjust
4598 the result. */
4599 if (unsignedp && can_do_signed && !DECIMAL_FLOAT_MODE_P (GET_MODE (to)))
4601 rtx label = gen_label_rtx ();
4602 rtx temp;
4603 REAL_VALUE_TYPE offset;
4605 /* Look for a usable floating mode FMODE wider than the source and at
4606 least as wide as the target. Using FMODE will avoid rounding woes
4607 with unsigned values greater than the signed maximum value. */
4609 for (fmode = GET_MODE (to); fmode != VOIDmode;
4610 fmode = GET_MODE_WIDER_MODE (fmode))
4611 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
4612 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
4613 break;
4615 if (fmode == VOIDmode)
4617 /* There is no such mode. Pretend the target is wide enough. */
4618 fmode = GET_MODE (to);
4620 /* Avoid double-rounding when TO is narrower than FROM. */
4621 if ((significand_size (fmode) + 1)
4622 < GET_MODE_BITSIZE (GET_MODE (from)))
4624 rtx temp1;
4625 rtx neglabel = gen_label_rtx ();
4627 /* Don't use TARGET if it isn't a register, is a hard register,
4628 or is the wrong mode. */
4629 if (!REG_P (target)
4630 || REGNO (target) < FIRST_PSEUDO_REGISTER
4631 || GET_MODE (target) != fmode)
4632 target = gen_reg_rtx (fmode);
4634 imode = GET_MODE (from);
4635 do_pending_stack_adjust ();
4637 /* Test whether the sign bit is set. */
4638 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4639 0, neglabel);
4641 /* The sign bit is not set. Convert as signed. */
4642 expand_float (target, from, 0);
4643 emit_jump_insn (gen_jump (label));
4644 emit_barrier ();
4646 /* The sign bit is set.
4647 Convert to a usable (positive signed) value by shifting right
4648 one bit, while remembering if a nonzero bit was shifted
4649 out; i.e., compute (from & 1) | (from >> 1). */
4651 emit_label (neglabel);
4652 temp = expand_binop (imode, and_optab, from, const1_rtx,
4653 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4654 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
4655 NULL_RTX, 1);
4656 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4657 OPTAB_LIB_WIDEN);
4658 expand_float (target, temp, 0);
4660 /* Multiply by 2 to undo the shift above. */
4661 temp = expand_binop (fmode, add_optab, target, target,
4662 target, 0, OPTAB_LIB_WIDEN);
4663 if (temp != target)
4664 emit_move_insn (target, temp);
4666 do_pending_stack_adjust ();
4667 emit_label (label);
4668 goto done;
4672 /* If we are about to do some arithmetic to correct for an
4673 unsigned operand, do it in a pseudo-register. */
4675 if (GET_MODE (to) != fmode
4676 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4677 target = gen_reg_rtx (fmode);
4679 /* Convert as signed integer to floating. */
4680 expand_float (target, from, 0);
4682 /* If FROM is negative (and therefore TO is negative),
4683 correct its value by 2**bitwidth. */
4685 do_pending_stack_adjust ();
4686 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
4687 0, label);
4690 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)));
4691 temp = expand_binop (fmode, add_optab, target,
4692 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
4693 target, 0, OPTAB_LIB_WIDEN);
4694 if (temp != target)
4695 emit_move_insn (target, temp);
4697 do_pending_stack_adjust ();
4698 emit_label (label);
4699 goto done;
4702 /* No hardware instruction available; call a library routine. */
4704 rtx libfunc;
4705 rtx insns;
4706 rtx value;
4707 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4709 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
4710 from = convert_to_mode (SImode, from, unsignedp);
4712 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4713 gcc_assert (libfunc);
4715 start_sequence ();
4717 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4718 GET_MODE (to), 1, from,
4719 GET_MODE (from));
4720 insns = get_insns ();
4721 end_sequence ();
4723 emit_libcall_block (insns, target, value,
4724 gen_rtx_FLOAT (GET_MODE (to), from));
4727 done:
4729 /* Copy result to requested destination
4730 if we have been computing in a temp location. */
4732 if (target != to)
4734 if (GET_MODE (target) == GET_MODE (to))
4735 emit_move_insn (to, target);
4736 else
4737 convert_move (to, target, 0);
4741 /* Generate code to convert FROM to fixed point and store in TO. FROM
4742 must be floating point. */
4744 void
4745 expand_fix (rtx to, rtx from, int unsignedp)
4747 enum insn_code icode;
4748 rtx target = to;
4749 enum machine_mode fmode, imode;
4750 int must_trunc = 0;
4752 /* We first try to find a pair of modes, one real and one integer, at
4753 least as wide as FROM and TO, respectively, in which we can open-code
4754 this conversion. If the integer mode is wider than the mode of TO,
4755 we can do the conversion either signed or unsigned. */
4757 for (fmode = GET_MODE (from); fmode != VOIDmode;
4758 fmode = GET_MODE_WIDER_MODE (fmode))
4759 for (imode = GET_MODE (to); imode != VOIDmode;
4760 imode = GET_MODE_WIDER_MODE (imode))
4762 int doing_unsigned = unsignedp;
4764 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
4765 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
4766 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
4768 if (icode != CODE_FOR_nothing)
4770 if (fmode != GET_MODE (from))
4771 from = convert_to_mode (fmode, from, 0);
4773 if (must_trunc)
4775 rtx temp = gen_reg_rtx (GET_MODE (from));
4776 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
4777 temp, 0);
4780 if (imode != GET_MODE (to))
4781 target = gen_reg_rtx (imode);
4783 emit_unop_insn (icode, target, from,
4784 doing_unsigned ? UNSIGNED_FIX : FIX);
4785 if (target != to)
4786 convert_move (to, target, unsignedp);
4787 return;
4791 /* For an unsigned conversion, there is one more way to do it.
4792 If we have a signed conversion, we generate code that compares
4793 the real value to the largest representable positive number. If if
4794 is smaller, the conversion is done normally. Otherwise, subtract
4795 one plus the highest signed number, convert, and add it back.
4797 We only need to check all real modes, since we know we didn't find
4798 anything with a wider integer mode.
4800 This code used to extend FP value into mode wider than the destination.
4801 This is not needed. Consider, for instance conversion from SFmode
4802 into DImode.
4804 The hot path through the code is dealing with inputs smaller than 2^63
4805 and doing just the conversion, so there is no bits to lose.
4807 In the other path we know the value is positive in the range 2^63..2^64-1
4808 inclusive. (as for other imput overflow happens and result is undefined)
4809 So we know that the most important bit set in mantissa corresponds to
4810 2^63. The subtraction of 2^63 should not generate any rounding as it
4811 simply clears out that bit. The rest is trivial. */
4813 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
4814 for (fmode = GET_MODE (from); fmode != VOIDmode;
4815 fmode = GET_MODE_WIDER_MODE (fmode))
4816 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0,
4817 &must_trunc))
4819 int bitsize;
4820 REAL_VALUE_TYPE offset;
4821 rtx limit, lab1, lab2, insn;
4823 bitsize = GET_MODE_BITSIZE (GET_MODE (to));
4824 real_2expN (&offset, bitsize - 1);
4825 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
4826 lab1 = gen_label_rtx ();
4827 lab2 = gen_label_rtx ();
4829 if (fmode != GET_MODE (from))
4830 from = convert_to_mode (fmode, from, 0);
4832 /* See if we need to do the subtraction. */
4833 do_pending_stack_adjust ();
4834 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
4835 0, lab1);
4837 /* If not, do the signed "fix" and branch around fixup code. */
4838 expand_fix (to, from, 0);
4839 emit_jump_insn (gen_jump (lab2));
4840 emit_barrier ();
4842 /* Otherwise, subtract 2**(N-1), convert to signed number,
4843 then add 2**(N-1). Do the addition using XOR since this
4844 will often generate better code. */
4845 emit_label (lab1);
4846 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
4847 NULL_RTX, 0, OPTAB_LIB_WIDEN);
4848 expand_fix (to, target, 0);
4849 target = expand_binop (GET_MODE (to), xor_optab, to,
4850 gen_int_mode
4851 ((HOST_WIDE_INT) 1 << (bitsize - 1),
4852 GET_MODE (to)),
4853 to, 1, OPTAB_LIB_WIDEN);
4855 if (target != to)
4856 emit_move_insn (to, target);
4858 emit_label (lab2);
4860 if (mov_optab->handlers[(int) GET_MODE (to)].insn_code
4861 != CODE_FOR_nothing)
4863 /* Make a place for a REG_NOTE and add it. */
4864 insn = emit_move_insn (to, to);
4865 set_unique_reg_note (insn,
4866 REG_EQUAL,
4867 gen_rtx_fmt_e (UNSIGNED_FIX,
4868 GET_MODE (to),
4869 copy_rtx (from)));
4872 return;
4875 /* We can't do it with an insn, so use a library call. But first ensure
4876 that the mode of TO is at least as wide as SImode, since those are the
4877 only library calls we know about. */
4879 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
4881 target = gen_reg_rtx (SImode);
4883 expand_fix (target, from, unsignedp);
4885 else
4887 rtx insns;
4888 rtx value;
4889 rtx libfunc;
4891 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
4892 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4893 gcc_assert (libfunc);
4895 start_sequence ();
4897 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4898 GET_MODE (to), 1, from,
4899 GET_MODE (from));
4900 insns = get_insns ();
4901 end_sequence ();
4903 emit_libcall_block (insns, target, value,
4904 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
4905 GET_MODE (to), from));
4908 if (target != to)
4910 if (GET_MODE (to) == GET_MODE (target))
4911 emit_move_insn (to, target);
4912 else
4913 convert_move (to, target, 0);
4917 /* Generate code to convert FROM to fixed point and store in TO. FROM
4918 must be floating point, TO must be signed. Use the conversion optab
4919 TAB to do the conversion. */
4921 bool
4922 expand_sfix_optab (rtx to, rtx from, convert_optab tab)
4924 enum insn_code icode;
4925 rtx target = to;
4926 enum machine_mode fmode, imode;
4928 /* We first try to find a pair of modes, one real and one integer, at
4929 least as wide as FROM and TO, respectively, in which we can open-code
4930 this conversion. If the integer mode is wider than the mode of TO,
4931 we can do the conversion either signed or unsigned. */
4933 for (fmode = GET_MODE (from); fmode != VOIDmode;
4934 fmode = GET_MODE_WIDER_MODE (fmode))
4935 for (imode = GET_MODE (to); imode != VOIDmode;
4936 imode = GET_MODE_WIDER_MODE (imode))
4938 icode = tab->handlers[imode][fmode].insn_code;
4939 if (icode != CODE_FOR_nothing)
4941 if (fmode != GET_MODE (from))
4942 from = convert_to_mode (fmode, from, 0);
4944 if (imode != GET_MODE (to))
4945 target = gen_reg_rtx (imode);
4947 emit_unop_insn (icode, target, from, UNKNOWN);
4948 if (target != to)
4949 convert_move (to, target, 0);
4950 return true;
4954 return false;
4957 /* Report whether we have an instruction to perform the operation
4958 specified by CODE on operands of mode MODE. */
4960 have_insn_for (enum rtx_code code, enum machine_mode mode)
4962 return (code_to_optab[(int) code] != 0
4963 && (code_to_optab[(int) code]->handlers[(int) mode].insn_code
4964 != CODE_FOR_nothing));
4967 /* Create a blank optab. */
4968 static optab
4969 new_optab (void)
4971 int i;
4972 optab op = ggc_alloc (sizeof (struct optab));
4973 for (i = 0; i < NUM_MACHINE_MODES; i++)
4975 op->handlers[i].insn_code = CODE_FOR_nothing;
4976 op->handlers[i].libfunc = 0;
4979 return op;
4982 static convert_optab
4983 new_convert_optab (void)
4985 int i, j;
4986 convert_optab op = ggc_alloc (sizeof (struct convert_optab));
4987 for (i = 0; i < NUM_MACHINE_MODES; i++)
4988 for (j = 0; j < NUM_MACHINE_MODES; j++)
4990 op->handlers[i][j].insn_code = CODE_FOR_nothing;
4991 op->handlers[i][j].libfunc = 0;
4993 return op;
4996 /* Same, but fill in its code as CODE, and write it into the
4997 code_to_optab table. */
4998 static inline optab
4999 init_optab (enum rtx_code code)
5001 optab op = new_optab ();
5002 op->code = code;
5003 code_to_optab[(int) code] = op;
5004 return op;
5007 /* Same, but fill in its code as CODE, and do _not_ write it into
5008 the code_to_optab table. */
5009 static inline optab
5010 init_optabv (enum rtx_code code)
5012 optab op = new_optab ();
5013 op->code = code;
5014 return op;
5017 /* Conversion optabs never go in the code_to_optab table. */
5018 static inline convert_optab
5019 init_convert_optab (enum rtx_code code)
5021 convert_optab op = new_convert_optab ();
5022 op->code = code;
5023 return op;
5026 /* Initialize the libfunc fields of an entire group of entries in some
5027 optab. Each entry is set equal to a string consisting of a leading
5028 pair of underscores followed by a generic operation name followed by
5029 a mode name (downshifted to lowercase) followed by a single character
5030 representing the number of operands for the given operation (which is
5031 usually one of the characters '2', '3', or '4').
5033 OPTABLE is the table in which libfunc fields are to be initialized.
5034 FIRST_MODE is the first machine mode index in the given optab to
5035 initialize.
5036 LAST_MODE is the last machine mode index in the given optab to
5037 initialize.
5038 OPNAME is the generic (string) name of the operation.
5039 SUFFIX is the character which specifies the number of operands for
5040 the given generic operation.
5043 static void
5044 init_libfuncs (optab optable, int first_mode, int last_mode,
5045 const char *opname, int suffix)
5047 int mode;
5048 unsigned opname_len = strlen (opname);
5050 for (mode = first_mode; (int) mode <= (int) last_mode;
5051 mode = (enum machine_mode) ((int) mode + 1))
5053 const char *mname = GET_MODE_NAME (mode);
5054 unsigned mname_len = strlen (mname);
5055 char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1);
5056 char *p;
5057 const char *q;
5059 p = libfunc_name;
5060 *p++ = '_';
5061 *p++ = '_';
5062 for (q = opname; *q; )
5063 *p++ = *q++;
5064 for (q = mname; *q; q++)
5065 *p++ = TOLOWER (*q);
5066 *p++ = suffix;
5067 *p = '\0';
5069 optable->handlers[(int) mode].libfunc
5070 = init_one_libfunc (ggc_alloc_string (libfunc_name, p - libfunc_name));
5074 /* Initialize the libfunc fields of an entire group of entries in some
5075 optab which correspond to all integer mode operations. The parameters
5076 have the same meaning as similarly named ones for the `init_libfuncs'
5077 routine. (See above). */
5079 static void
5080 init_integral_libfuncs (optab optable, const char *opname, int suffix)
5082 int maxsize = 2*BITS_PER_WORD;
5083 if (maxsize < LONG_LONG_TYPE_SIZE)
5084 maxsize = LONG_LONG_TYPE_SIZE;
5085 init_libfuncs (optable, word_mode,
5086 mode_for_size (maxsize, MODE_INT, 0),
5087 opname, suffix);
5090 /* Initialize the libfunc fields of an entire group of entries in some
5091 optab which correspond to all real mode operations. The parameters
5092 have the same meaning as similarly named ones for the `init_libfuncs'
5093 routine. (See above). */
5095 static void
5096 init_floating_libfuncs (optab optable, const char *opname, int suffix)
5098 init_libfuncs (optable, MIN_MODE_FLOAT, MAX_MODE_FLOAT, opname, suffix);
5099 init_libfuncs (optable, MIN_MODE_DECIMAL_FLOAT, MAX_MODE_DECIMAL_FLOAT,
5100 opname, suffix);
5103 /* Initialize the libfunc fields of an entire group of entries of an
5104 inter-mode-class conversion optab. The string formation rules are
5105 similar to the ones for init_libfuncs, above, but instead of having
5106 a mode name and an operand count these functions have two mode names
5107 and no operand count. */
5108 static void
5109 init_interclass_conv_libfuncs (convert_optab tab, const char *opname,
5110 enum mode_class from_class,
5111 enum mode_class to_class)
5113 enum machine_mode first_from_mode = GET_CLASS_NARROWEST_MODE (from_class);
5114 enum machine_mode first_to_mode = GET_CLASS_NARROWEST_MODE (to_class);
5115 size_t opname_len = strlen (opname);
5116 size_t max_mname_len = 0;
5118 enum machine_mode fmode, tmode;
5119 const char *fname, *tname;
5120 const char *q;
5121 char *libfunc_name, *suffix;
5122 char *p;
5124 for (fmode = first_from_mode;
5125 fmode != VOIDmode;
5126 fmode = GET_MODE_WIDER_MODE (fmode))
5127 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (fmode)));
5129 for (tmode = first_to_mode;
5130 tmode != VOIDmode;
5131 tmode = GET_MODE_WIDER_MODE (tmode))
5132 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (tmode)));
5134 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
5135 libfunc_name[0] = '_';
5136 libfunc_name[1] = '_';
5137 memcpy (&libfunc_name[2], opname, opname_len);
5138 suffix = libfunc_name + opname_len + 2;
5140 for (fmode = first_from_mode; fmode != VOIDmode;
5141 fmode = GET_MODE_WIDER_MODE (fmode))
5142 for (tmode = first_to_mode; tmode != VOIDmode;
5143 tmode = GET_MODE_WIDER_MODE (tmode))
5145 fname = GET_MODE_NAME (fmode);
5146 tname = GET_MODE_NAME (tmode);
5148 p = suffix;
5149 for (q = fname; *q; p++, q++)
5150 *p = TOLOWER (*q);
5151 for (q = tname; *q; p++, q++)
5152 *p = TOLOWER (*q);
5154 *p = '\0';
5156 tab->handlers[tmode][fmode].libfunc
5157 = init_one_libfunc (ggc_alloc_string (libfunc_name,
5158 p - libfunc_name));
5162 /* Initialize the libfunc fields of an entire group of entries of an
5163 intra-mode-class conversion optab. The string formation rules are
5164 similar to the ones for init_libfunc, above. WIDENING says whether
5165 the optab goes from narrow to wide modes or vice versa. These functions
5166 have two mode names _and_ an operand count. */
5167 static void
5168 init_intraclass_conv_libfuncs (convert_optab tab, const char *opname,
5169 enum mode_class class, bool widening)
5171 enum machine_mode first_mode = GET_CLASS_NARROWEST_MODE (class);
5172 size_t opname_len = strlen (opname);
5173 size_t max_mname_len = 0;
5175 enum machine_mode nmode, wmode;
5176 const char *nname, *wname;
5177 const char *q;
5178 char *libfunc_name, *suffix;
5179 char *p;
5181 for (nmode = first_mode; nmode != VOIDmode;
5182 nmode = GET_MODE_WIDER_MODE (nmode))
5183 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (nmode)));
5185 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
5186 libfunc_name[0] = '_';
5187 libfunc_name[1] = '_';
5188 memcpy (&libfunc_name[2], opname, opname_len);
5189 suffix = libfunc_name + opname_len + 2;
5191 for (nmode = first_mode; nmode != VOIDmode;
5192 nmode = GET_MODE_WIDER_MODE (nmode))
5193 for (wmode = GET_MODE_WIDER_MODE (nmode); wmode != VOIDmode;
5194 wmode = GET_MODE_WIDER_MODE (wmode))
5196 nname = GET_MODE_NAME (nmode);
5197 wname = GET_MODE_NAME (wmode);
5199 p = suffix;
5200 for (q = widening ? nname : wname; *q; p++, q++)
5201 *p = TOLOWER (*q);
5202 for (q = widening ? wname : nname; *q; p++, q++)
5203 *p = TOLOWER (*q);
5205 *p++ = '2';
5206 *p = '\0';
5208 tab->handlers[widening ? wmode : nmode]
5209 [widening ? nmode : wmode].libfunc
5210 = init_one_libfunc (ggc_alloc_string (libfunc_name,
5211 p - libfunc_name));
5217 init_one_libfunc (const char *name)
5219 rtx symbol;
5221 /* Create a FUNCTION_DECL that can be passed to
5222 targetm.encode_section_info. */
5223 /* ??? We don't have any type information except for this is
5224 a function. Pretend this is "int foo()". */
5225 tree decl = build_decl (FUNCTION_DECL, get_identifier (name),
5226 build_function_type (integer_type_node, NULL_TREE));
5227 DECL_ARTIFICIAL (decl) = 1;
5228 DECL_EXTERNAL (decl) = 1;
5229 TREE_PUBLIC (decl) = 1;
5231 symbol = XEXP (DECL_RTL (decl), 0);
5233 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
5234 are the flags assigned by targetm.encode_section_info. */
5235 SET_SYMBOL_REF_DECL (symbol, 0);
5237 return symbol;
5240 /* Call this to reset the function entry for one optab (OPTABLE) in mode
5241 MODE to NAME, which should be either 0 or a string constant. */
5242 void
5243 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
5245 if (name)
5246 optable->handlers[mode].libfunc = init_one_libfunc (name);
5247 else
5248 optable->handlers[mode].libfunc = 0;
5251 /* Call this to reset the function entry for one conversion optab
5252 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
5253 either 0 or a string constant. */
5254 void
5255 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
5256 enum machine_mode fmode, const char *name)
5258 if (name)
5259 optable->handlers[tmode][fmode].libfunc = init_one_libfunc (name);
5260 else
5261 optable->handlers[tmode][fmode].libfunc = 0;
5264 /* Call this once to initialize the contents of the optabs
5265 appropriately for the current target machine. */
5267 void
5268 init_optabs (void)
5270 unsigned int i;
5272 /* Start by initializing all tables to contain CODE_FOR_nothing. */
5274 for (i = 0; i < NUM_RTX_CODE; i++)
5275 setcc_gen_code[i] = CODE_FOR_nothing;
5277 #ifdef HAVE_conditional_move
5278 for (i = 0; i < NUM_MACHINE_MODES; i++)
5279 movcc_gen_code[i] = CODE_FOR_nothing;
5280 #endif
5282 for (i = 0; i < NUM_MACHINE_MODES; i++)
5284 vcond_gen_code[i] = CODE_FOR_nothing;
5285 vcondu_gen_code[i] = CODE_FOR_nothing;
5288 add_optab = init_optab (PLUS);
5289 addv_optab = init_optabv (PLUS);
5290 sub_optab = init_optab (MINUS);
5291 subv_optab = init_optabv (MINUS);
5292 smul_optab = init_optab (MULT);
5293 smulv_optab = init_optabv (MULT);
5294 smul_highpart_optab = init_optab (UNKNOWN);
5295 umul_highpart_optab = init_optab (UNKNOWN);
5296 smul_widen_optab = init_optab (UNKNOWN);
5297 umul_widen_optab = init_optab (UNKNOWN);
5298 usmul_widen_optab = init_optab (UNKNOWN);
5299 sdiv_optab = init_optab (DIV);
5300 sdivv_optab = init_optabv (DIV);
5301 sdivmod_optab = init_optab (UNKNOWN);
5302 udiv_optab = init_optab (UDIV);
5303 udivmod_optab = init_optab (UNKNOWN);
5304 smod_optab = init_optab (MOD);
5305 umod_optab = init_optab (UMOD);
5306 fmod_optab = init_optab (UNKNOWN);
5307 remainder_optab = init_optab (UNKNOWN);
5308 ftrunc_optab = init_optab (UNKNOWN);
5309 and_optab = init_optab (AND);
5310 ior_optab = init_optab (IOR);
5311 xor_optab = init_optab (XOR);
5312 ashl_optab = init_optab (ASHIFT);
5313 ashr_optab = init_optab (ASHIFTRT);
5314 lshr_optab = init_optab (LSHIFTRT);
5315 rotl_optab = init_optab (ROTATE);
5316 rotr_optab = init_optab (ROTATERT);
5317 smin_optab = init_optab (SMIN);
5318 smax_optab = init_optab (SMAX);
5319 umin_optab = init_optab (UMIN);
5320 umax_optab = init_optab (UMAX);
5321 pow_optab = init_optab (UNKNOWN);
5322 atan2_optab = init_optab (UNKNOWN);
5324 /* These three have codes assigned exclusively for the sake of
5325 have_insn_for. */
5326 mov_optab = init_optab (SET);
5327 movstrict_optab = init_optab (STRICT_LOW_PART);
5328 cmp_optab = init_optab (COMPARE);
5330 ucmp_optab = init_optab (UNKNOWN);
5331 tst_optab = init_optab (UNKNOWN);
5333 eq_optab = init_optab (EQ);
5334 ne_optab = init_optab (NE);
5335 gt_optab = init_optab (GT);
5336 ge_optab = init_optab (GE);
5337 lt_optab = init_optab (LT);
5338 le_optab = init_optab (LE);
5339 unord_optab = init_optab (UNORDERED);
5341 neg_optab = init_optab (NEG);
5342 negv_optab = init_optabv (NEG);
5343 abs_optab = init_optab (ABS);
5344 absv_optab = init_optabv (ABS);
5345 addcc_optab = init_optab (UNKNOWN);
5346 one_cmpl_optab = init_optab (NOT);
5347 bswap_optab = init_optab (BSWAP);
5348 ffs_optab = init_optab (FFS);
5349 clz_optab = init_optab (CLZ);
5350 ctz_optab = init_optab (CTZ);
5351 popcount_optab = init_optab (POPCOUNT);
5352 parity_optab = init_optab (PARITY);
5353 sqrt_optab = init_optab (SQRT);
5354 floor_optab = init_optab (UNKNOWN);
5355 ceil_optab = init_optab (UNKNOWN);
5356 round_optab = init_optab (UNKNOWN);
5357 btrunc_optab = init_optab (UNKNOWN);
5358 nearbyint_optab = init_optab (UNKNOWN);
5359 rint_optab = init_optab (UNKNOWN);
5360 sincos_optab = init_optab (UNKNOWN);
5361 sin_optab = init_optab (UNKNOWN);
5362 asin_optab = init_optab (UNKNOWN);
5363 cos_optab = init_optab (UNKNOWN);
5364 acos_optab = init_optab (UNKNOWN);
5365 exp_optab = init_optab (UNKNOWN);
5366 exp10_optab = init_optab (UNKNOWN);
5367 exp2_optab = init_optab (UNKNOWN);
5368 expm1_optab = init_optab (UNKNOWN);
5369 ldexp_optab = init_optab (UNKNOWN);
5370 logb_optab = init_optab (UNKNOWN);
5371 ilogb_optab = init_optab (UNKNOWN);
5372 log_optab = init_optab (UNKNOWN);
5373 log10_optab = init_optab (UNKNOWN);
5374 log2_optab = init_optab (UNKNOWN);
5375 log1p_optab = init_optab (UNKNOWN);
5376 tan_optab = init_optab (UNKNOWN);
5377 atan_optab = init_optab (UNKNOWN);
5378 copysign_optab = init_optab (UNKNOWN);
5380 strlen_optab = init_optab (UNKNOWN);
5381 cbranch_optab = init_optab (UNKNOWN);
5382 cmov_optab = init_optab (UNKNOWN);
5383 cstore_optab = init_optab (UNKNOWN);
5384 push_optab = init_optab (UNKNOWN);
5386 reduc_smax_optab = init_optab (UNKNOWN);
5387 reduc_umax_optab = init_optab (UNKNOWN);
5388 reduc_smin_optab = init_optab (UNKNOWN);
5389 reduc_umin_optab = init_optab (UNKNOWN);
5390 reduc_splus_optab = init_optab (UNKNOWN);
5391 reduc_uplus_optab = init_optab (UNKNOWN);
5393 ssum_widen_optab = init_optab (UNKNOWN);
5394 usum_widen_optab = init_optab (UNKNOWN);
5395 sdot_prod_optab = init_optab (UNKNOWN);
5396 udot_prod_optab = init_optab (UNKNOWN);
5398 vec_extract_optab = init_optab (UNKNOWN);
5399 vec_extract_even_optab = init_optab (UNKNOWN);
5400 vec_extract_odd_optab = init_optab (UNKNOWN);
5401 vec_interleave_high_optab = init_optab (UNKNOWN);
5402 vec_interleave_low_optab = init_optab (UNKNOWN);
5403 vec_set_optab = init_optab (UNKNOWN);
5404 vec_init_optab = init_optab (UNKNOWN);
5405 vec_shl_optab = init_optab (UNKNOWN);
5406 vec_shr_optab = init_optab (UNKNOWN);
5407 vec_realign_load_optab = init_optab (UNKNOWN);
5408 movmisalign_optab = init_optab (UNKNOWN);
5409 vec_widen_umult_hi_optab = init_optab (UNKNOWN);
5410 vec_widen_umult_lo_optab = init_optab (UNKNOWN);
5411 vec_widen_smult_hi_optab = init_optab (UNKNOWN);
5412 vec_widen_smult_lo_optab = init_optab (UNKNOWN);
5413 vec_unpacks_hi_optab = init_optab (UNKNOWN);
5414 vec_unpacks_lo_optab = init_optab (UNKNOWN);
5415 vec_unpacku_hi_optab = init_optab (UNKNOWN);
5416 vec_unpacku_lo_optab = init_optab (UNKNOWN);
5417 vec_pack_mod_optab = init_optab (UNKNOWN);
5418 vec_pack_usat_optab = init_optab (UNKNOWN);
5419 vec_pack_ssat_optab = init_optab (UNKNOWN);
5421 powi_optab = init_optab (UNKNOWN);
5423 /* Conversions. */
5424 sext_optab = init_convert_optab (SIGN_EXTEND);
5425 zext_optab = init_convert_optab (ZERO_EXTEND);
5426 trunc_optab = init_convert_optab (TRUNCATE);
5427 sfix_optab = init_convert_optab (FIX);
5428 ufix_optab = init_convert_optab (UNSIGNED_FIX);
5429 sfixtrunc_optab = init_convert_optab (UNKNOWN);
5430 ufixtrunc_optab = init_convert_optab (UNKNOWN);
5431 sfloat_optab = init_convert_optab (FLOAT);
5432 ufloat_optab = init_convert_optab (UNSIGNED_FLOAT);
5433 lrint_optab = init_convert_optab (UNKNOWN);
5434 lround_optab = init_convert_optab (UNKNOWN);
5435 lfloor_optab = init_convert_optab (UNKNOWN);
5436 lceil_optab = init_convert_optab (UNKNOWN);
5438 for (i = 0; i < NUM_MACHINE_MODES; i++)
5440 movmem_optab[i] = CODE_FOR_nothing;
5441 cmpstr_optab[i] = CODE_FOR_nothing;
5442 cmpstrn_optab[i] = CODE_FOR_nothing;
5443 cmpmem_optab[i] = CODE_FOR_nothing;
5444 setmem_optab[i] = CODE_FOR_nothing;
5446 sync_add_optab[i] = CODE_FOR_nothing;
5447 sync_sub_optab[i] = CODE_FOR_nothing;
5448 sync_ior_optab[i] = CODE_FOR_nothing;
5449 sync_and_optab[i] = CODE_FOR_nothing;
5450 sync_xor_optab[i] = CODE_FOR_nothing;
5451 sync_nand_optab[i] = CODE_FOR_nothing;
5452 sync_old_add_optab[i] = CODE_FOR_nothing;
5453 sync_old_sub_optab[i] = CODE_FOR_nothing;
5454 sync_old_ior_optab[i] = CODE_FOR_nothing;
5455 sync_old_and_optab[i] = CODE_FOR_nothing;
5456 sync_old_xor_optab[i] = CODE_FOR_nothing;
5457 sync_old_nand_optab[i] = CODE_FOR_nothing;
5458 sync_new_add_optab[i] = CODE_FOR_nothing;
5459 sync_new_sub_optab[i] = CODE_FOR_nothing;
5460 sync_new_ior_optab[i] = CODE_FOR_nothing;
5461 sync_new_and_optab[i] = CODE_FOR_nothing;
5462 sync_new_xor_optab[i] = CODE_FOR_nothing;
5463 sync_new_nand_optab[i] = CODE_FOR_nothing;
5464 sync_compare_and_swap[i] = CODE_FOR_nothing;
5465 sync_compare_and_swap_cc[i] = CODE_FOR_nothing;
5466 sync_lock_test_and_set[i] = CODE_FOR_nothing;
5467 sync_lock_release[i] = CODE_FOR_nothing;
5469 reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
5472 /* Fill in the optabs with the insns we support. */
5473 init_all_optabs ();
5475 /* Initialize the optabs with the names of the library functions. */
5476 init_integral_libfuncs (add_optab, "add", '3');
5477 init_floating_libfuncs (add_optab, "add", '3');
5478 init_integral_libfuncs (addv_optab, "addv", '3');
5479 init_floating_libfuncs (addv_optab, "add", '3');
5480 init_integral_libfuncs (sub_optab, "sub", '3');
5481 init_floating_libfuncs (sub_optab, "sub", '3');
5482 init_integral_libfuncs (subv_optab, "subv", '3');
5483 init_floating_libfuncs (subv_optab, "sub", '3');
5484 init_integral_libfuncs (smul_optab, "mul", '3');
5485 init_floating_libfuncs (smul_optab, "mul", '3');
5486 init_integral_libfuncs (smulv_optab, "mulv", '3');
5487 init_floating_libfuncs (smulv_optab, "mul", '3');
5488 init_integral_libfuncs (sdiv_optab, "div", '3');
5489 init_floating_libfuncs (sdiv_optab, "div", '3');
5490 init_integral_libfuncs (sdivv_optab, "divv", '3');
5491 init_integral_libfuncs (udiv_optab, "udiv", '3');
5492 init_integral_libfuncs (sdivmod_optab, "divmod", '4');
5493 init_integral_libfuncs (udivmod_optab, "udivmod", '4');
5494 init_integral_libfuncs (smod_optab, "mod", '3');
5495 init_integral_libfuncs (umod_optab, "umod", '3');
5496 init_floating_libfuncs (ftrunc_optab, "ftrunc", '2');
5497 init_integral_libfuncs (and_optab, "and", '3');
5498 init_integral_libfuncs (ior_optab, "ior", '3');
5499 init_integral_libfuncs (xor_optab, "xor", '3');
5500 init_integral_libfuncs (ashl_optab, "ashl", '3');
5501 init_integral_libfuncs (ashr_optab, "ashr", '3');
5502 init_integral_libfuncs (lshr_optab, "lshr", '3');
5503 init_integral_libfuncs (smin_optab, "min", '3');
5504 init_floating_libfuncs (smin_optab, "min", '3');
5505 init_integral_libfuncs (smax_optab, "max", '3');
5506 init_floating_libfuncs (smax_optab, "max", '3');
5507 init_integral_libfuncs (umin_optab, "umin", '3');
5508 init_integral_libfuncs (umax_optab, "umax", '3');
5509 init_integral_libfuncs (neg_optab, "neg", '2');
5510 init_floating_libfuncs (neg_optab, "neg", '2');
5511 init_integral_libfuncs (negv_optab, "negv", '2');
5512 init_floating_libfuncs (negv_optab, "neg", '2');
5513 init_integral_libfuncs (one_cmpl_optab, "one_cmpl", '2');
5514 init_integral_libfuncs (ffs_optab, "ffs", '2');
5515 init_integral_libfuncs (clz_optab, "clz", '2');
5516 init_integral_libfuncs (ctz_optab, "ctz", '2');
5517 init_integral_libfuncs (popcount_optab, "popcount", '2');
5518 init_integral_libfuncs (parity_optab, "parity", '2');
5520 /* Comparison libcalls for integers MUST come in pairs,
5521 signed/unsigned. */
5522 init_integral_libfuncs (cmp_optab, "cmp", '2');
5523 init_integral_libfuncs (ucmp_optab, "ucmp", '2');
5524 init_floating_libfuncs (cmp_optab, "cmp", '2');
5526 /* EQ etc are floating point only. */
5527 init_floating_libfuncs (eq_optab, "eq", '2');
5528 init_floating_libfuncs (ne_optab, "ne", '2');
5529 init_floating_libfuncs (gt_optab, "gt", '2');
5530 init_floating_libfuncs (ge_optab, "ge", '2');
5531 init_floating_libfuncs (lt_optab, "lt", '2');
5532 init_floating_libfuncs (le_optab, "le", '2');
5533 init_floating_libfuncs (unord_optab, "unord", '2');
5535 init_floating_libfuncs (powi_optab, "powi", '2');
5537 /* Conversions. */
5538 init_interclass_conv_libfuncs (sfloat_optab, "float",
5539 MODE_INT, MODE_FLOAT);
5540 init_interclass_conv_libfuncs (sfloat_optab, "float",
5541 MODE_INT, MODE_DECIMAL_FLOAT);
5542 init_interclass_conv_libfuncs (ufloat_optab, "floatun",
5543 MODE_INT, MODE_FLOAT);
5544 init_interclass_conv_libfuncs (ufloat_optab, "floatun",
5545 MODE_INT, MODE_DECIMAL_FLOAT);
5546 init_interclass_conv_libfuncs (sfix_optab, "fix",
5547 MODE_FLOAT, MODE_INT);
5548 init_interclass_conv_libfuncs (sfix_optab, "fix",
5549 MODE_DECIMAL_FLOAT, MODE_INT);
5550 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5551 MODE_FLOAT, MODE_INT);
5552 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5553 MODE_DECIMAL_FLOAT, MODE_INT);
5554 init_interclass_conv_libfuncs (ufloat_optab, "floatuns",
5555 MODE_INT, MODE_DECIMAL_FLOAT);
5556 init_interclass_conv_libfuncs (lrint_optab, "lrint",
5557 MODE_INT, MODE_FLOAT);
5558 init_interclass_conv_libfuncs (lround_optab, "lround",
5559 MODE_INT, MODE_FLOAT);
5560 init_interclass_conv_libfuncs (lfloor_optab, "lfloor",
5561 MODE_INT, MODE_FLOAT);
5562 init_interclass_conv_libfuncs (lceil_optab, "lceil",
5563 MODE_INT, MODE_FLOAT);
5565 /* sext_optab is also used for FLOAT_EXTEND. */
5566 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, true);
5567 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_DECIMAL_FLOAT, true);
5568 init_interclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, MODE_DECIMAL_FLOAT);
5569 init_interclass_conv_libfuncs (sext_optab, "extend", MODE_DECIMAL_FLOAT, MODE_FLOAT);
5570 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, false);
5571 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_DECIMAL_FLOAT, false);
5572 init_interclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, MODE_DECIMAL_FLOAT);
5573 init_interclass_conv_libfuncs (trunc_optab, "trunc", MODE_DECIMAL_FLOAT, MODE_FLOAT);
5575 /* Explicitly initialize the bswap libfuncs since we need them to be
5576 valid for things other than word_mode. */
5577 set_optab_libfunc (bswap_optab, SImode, "__bswapsi2");
5578 set_optab_libfunc (bswap_optab, DImode, "__bswapdi2");
5580 /* Use cabs for double complex abs, since systems generally have cabs.
5581 Don't define any libcall for float complex, so that cabs will be used. */
5582 if (complex_double_type_node)
5583 abs_optab->handlers[TYPE_MODE (complex_double_type_node)].libfunc
5584 = init_one_libfunc ("cabs");
5586 /* The ffs function operates on `int'. */
5587 ffs_optab->handlers[(int) mode_for_size (INT_TYPE_SIZE, MODE_INT, 0)].libfunc
5588 = init_one_libfunc ("ffs");
5590 abort_libfunc = init_one_libfunc ("abort");
5591 memcpy_libfunc = init_one_libfunc ("memcpy");
5592 memmove_libfunc = init_one_libfunc ("memmove");
5593 memcmp_libfunc = init_one_libfunc ("memcmp");
5594 memset_libfunc = init_one_libfunc ("memset");
5595 setbits_libfunc = init_one_libfunc ("__setbits");
5597 #ifndef DONT_USE_BUILTIN_SETJMP
5598 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
5599 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
5600 #else
5601 setjmp_libfunc = init_one_libfunc ("setjmp");
5602 longjmp_libfunc = init_one_libfunc ("longjmp");
5603 #endif
5604 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
5605 unwind_sjlj_unregister_libfunc
5606 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5608 /* For function entry/exit instrumentation. */
5609 profile_function_entry_libfunc
5610 = init_one_libfunc ("__cyg_profile_func_enter");
5611 profile_function_exit_libfunc
5612 = init_one_libfunc ("__cyg_profile_func_exit");
5614 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
5616 if (HAVE_conditional_trap)
5617 trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
5619 /* Allow the target to add more libcalls or rename some, etc. */
5620 targetm.init_libfuncs ();
5623 #ifdef DEBUG
5625 /* Print information about the current contents of the optabs on
5626 STDERR. */
5628 static void
5629 debug_optab_libfuncs (void)
5631 int i;
5632 int j;
5633 int k;
5635 /* Dump the arithmetic optabs. */
5636 for (i = 0; i != (int) OTI_MAX; i++)
5637 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5639 optab o;
5640 struct optab_handlers *h;
5642 o = optab_table[i];
5643 h = &o->handlers[j];
5644 if (h->libfunc)
5646 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5647 fprintf (stderr, "%s\t%s:\t%s\n",
5648 GET_RTX_NAME (o->code),
5649 GET_MODE_NAME (j),
5650 XSTR (h->libfunc, 0));
5654 /* Dump the conversion optabs. */
5655 for (i = 0; i < (int) COI_MAX; ++i)
5656 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5657 for (k = 0; k < NUM_MACHINE_MODES; ++k)
5659 convert_optab o;
5660 struct optab_handlers *h;
5662 o = &convert_optab_table[i];
5663 h = &o->handlers[j][k];
5664 if (h->libfunc)
5666 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5667 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
5668 GET_RTX_NAME (o->code),
5669 GET_MODE_NAME (j),
5670 GET_MODE_NAME (k),
5671 XSTR (h->libfunc, 0));
5676 #endif /* DEBUG */
5679 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5680 CODE. Return 0 on failure. */
5683 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1,
5684 rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED)
5686 enum machine_mode mode = GET_MODE (op1);
5687 enum insn_code icode;
5688 rtx insn;
5690 if (!HAVE_conditional_trap)
5691 return 0;
5693 if (mode == VOIDmode)
5694 return 0;
5696 icode = cmp_optab->handlers[(int) mode].insn_code;
5697 if (icode == CODE_FOR_nothing)
5698 return 0;
5700 start_sequence ();
5701 op1 = prepare_operand (icode, op1, 0, mode, mode, 0);
5702 op2 = prepare_operand (icode, op2, 1, mode, mode, 0);
5703 if (!op1 || !op2)
5705 end_sequence ();
5706 return 0;
5708 emit_insn (GEN_FCN (icode) (op1, op2));
5710 PUT_CODE (trap_rtx, code);
5711 gcc_assert (HAVE_conditional_trap);
5712 insn = gen_conditional_trap (trap_rtx, tcode);
5713 if (insn)
5715 emit_insn (insn);
5716 insn = get_insns ();
5718 end_sequence ();
5720 return insn;
5723 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5724 or unsigned operation code. */
5726 static enum rtx_code
5727 get_rtx_code (enum tree_code tcode, bool unsignedp)
5729 enum rtx_code code;
5730 switch (tcode)
5732 case EQ_EXPR:
5733 code = EQ;
5734 break;
5735 case NE_EXPR:
5736 code = NE;
5737 break;
5738 case LT_EXPR:
5739 code = unsignedp ? LTU : LT;
5740 break;
5741 case LE_EXPR:
5742 code = unsignedp ? LEU : LE;
5743 break;
5744 case GT_EXPR:
5745 code = unsignedp ? GTU : GT;
5746 break;
5747 case GE_EXPR:
5748 code = unsignedp ? GEU : GE;
5749 break;
5751 case UNORDERED_EXPR:
5752 code = UNORDERED;
5753 break;
5754 case ORDERED_EXPR:
5755 code = ORDERED;
5756 break;
5757 case UNLT_EXPR:
5758 code = UNLT;
5759 break;
5760 case UNLE_EXPR:
5761 code = UNLE;
5762 break;
5763 case UNGT_EXPR:
5764 code = UNGT;
5765 break;
5766 case UNGE_EXPR:
5767 code = UNGE;
5768 break;
5769 case UNEQ_EXPR:
5770 code = UNEQ;
5771 break;
5772 case LTGT_EXPR:
5773 code = LTGT;
5774 break;
5776 default:
5777 gcc_unreachable ();
5779 return code;
5782 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5783 unsigned operators. Do not generate compare instruction. */
5785 static rtx
5786 vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
5788 enum rtx_code rcode;
5789 tree t_op0, t_op1;
5790 rtx rtx_op0, rtx_op1;
5792 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
5793 ensures that condition is a relational operation. */
5794 gcc_assert (COMPARISON_CLASS_P (cond));
5796 rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
5797 t_op0 = TREE_OPERAND (cond, 0);
5798 t_op1 = TREE_OPERAND (cond, 1);
5800 /* Expand operands. */
5801 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)), 1);
5802 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)), 1);
5804 if (!insn_data[icode].operand[4].predicate (rtx_op0, GET_MODE (rtx_op0))
5805 && GET_MODE (rtx_op0) != VOIDmode)
5806 rtx_op0 = force_reg (GET_MODE (rtx_op0), rtx_op0);
5808 if (!insn_data[icode].operand[5].predicate (rtx_op1, GET_MODE (rtx_op1))
5809 && GET_MODE (rtx_op1) != VOIDmode)
5810 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
5812 return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1);
5815 /* Return insn code for VEC_COND_EXPR EXPR. */
5817 static inline enum insn_code
5818 get_vcond_icode (tree expr, enum machine_mode mode)
5820 enum insn_code icode = CODE_FOR_nothing;
5822 if (TYPE_UNSIGNED (TREE_TYPE (expr)))
5823 icode = vcondu_gen_code[mode];
5824 else
5825 icode = vcond_gen_code[mode];
5826 return icode;
5829 /* Return TRUE iff, appropriate vector insns are available
5830 for vector cond expr expr in VMODE mode. */
5832 bool
5833 expand_vec_cond_expr_p (tree expr, enum machine_mode vmode)
5835 if (get_vcond_icode (expr, vmode) == CODE_FOR_nothing)
5836 return false;
5837 return true;
5840 /* Generate insns for VEC_COND_EXPR. */
5843 expand_vec_cond_expr (tree vec_cond_expr, rtx target)
5845 enum insn_code icode;
5846 rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1;
5847 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_cond_expr));
5848 bool unsignedp = TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr));
5850 icode = get_vcond_icode (vec_cond_expr, mode);
5851 if (icode == CODE_FOR_nothing)
5852 return 0;
5854 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5855 target = gen_reg_rtx (mode);
5857 /* Get comparison rtx. First expand both cond expr operands. */
5858 comparison = vector_compare_rtx (TREE_OPERAND (vec_cond_expr, 0),
5859 unsignedp, icode);
5860 cc_op0 = XEXP (comparison, 0);
5861 cc_op1 = XEXP (comparison, 1);
5862 /* Expand both operands and force them in reg, if required. */
5863 rtx_op1 = expand_expr (TREE_OPERAND (vec_cond_expr, 1),
5864 NULL_RTX, VOIDmode, EXPAND_NORMAL);
5865 if (!insn_data[icode].operand[1].predicate (rtx_op1, mode)
5866 && mode != VOIDmode)
5867 rtx_op1 = force_reg (mode, rtx_op1);
5869 rtx_op2 = expand_expr (TREE_OPERAND (vec_cond_expr, 2),
5870 NULL_RTX, VOIDmode, EXPAND_NORMAL);
5871 if (!insn_data[icode].operand[2].predicate (rtx_op2, mode)
5872 && mode != VOIDmode)
5873 rtx_op2 = force_reg (mode, rtx_op2);
5875 /* Emit instruction! */
5876 emit_insn (GEN_FCN (icode) (target, rtx_op1, rtx_op2,
5877 comparison, cc_op0, cc_op1));
5879 return target;
5883 /* This is an internal subroutine of the other compare_and_swap expanders.
5884 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
5885 operation. TARGET is an optional place to store the value result of
5886 the operation. ICODE is the particular instruction to expand. Return
5887 the result of the operation. */
5889 static rtx
5890 expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val,
5891 rtx target, enum insn_code icode)
5893 enum machine_mode mode = GET_MODE (mem);
5894 rtx insn;
5896 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5897 target = gen_reg_rtx (mode);
5899 if (GET_MODE (old_val) != VOIDmode && GET_MODE (old_val) != mode)
5900 old_val = convert_modes (mode, GET_MODE (old_val), old_val, 1);
5901 if (!insn_data[icode].operand[2].predicate (old_val, mode))
5902 old_val = force_reg (mode, old_val);
5904 if (GET_MODE (new_val) != VOIDmode && GET_MODE (new_val) != mode)
5905 new_val = convert_modes (mode, GET_MODE (new_val), new_val, 1);
5906 if (!insn_data[icode].operand[3].predicate (new_val, mode))
5907 new_val = force_reg (mode, new_val);
5909 insn = GEN_FCN (icode) (target, mem, old_val, new_val);
5910 if (insn == NULL_RTX)
5911 return NULL_RTX;
5912 emit_insn (insn);
5914 return target;
5917 /* Expand a compare-and-swap operation and return its value. */
5920 expand_val_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
5922 enum machine_mode mode = GET_MODE (mem);
5923 enum insn_code icode = sync_compare_and_swap[mode];
5925 if (icode == CODE_FOR_nothing)
5926 return NULL_RTX;
5928 return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode);
5931 /* Expand a compare-and-swap operation and store true into the result if
5932 the operation was successful and false otherwise. Return the result.
5933 Unlike other routines, TARGET is not optional. */
5936 expand_bool_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
5938 enum machine_mode mode = GET_MODE (mem);
5939 enum insn_code icode;
5940 rtx subtarget, label0, label1;
5942 /* If the target supports a compare-and-swap pattern that simultaneously
5943 sets some flag for success, then use it. Otherwise use the regular
5944 compare-and-swap and follow that immediately with a compare insn. */
5945 icode = sync_compare_and_swap_cc[mode];
5946 switch (icode)
5948 default:
5949 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
5950 NULL_RTX, icode);
5951 if (subtarget != NULL_RTX)
5952 break;
5954 /* FALLTHRU */
5955 case CODE_FOR_nothing:
5956 icode = sync_compare_and_swap[mode];
5957 if (icode == CODE_FOR_nothing)
5958 return NULL_RTX;
5960 /* Ensure that if old_val == mem, that we're not comparing
5961 against an old value. */
5962 if (MEM_P (old_val))
5963 old_val = force_reg (mode, old_val);
5965 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
5966 NULL_RTX, icode);
5967 if (subtarget == NULL_RTX)
5968 return NULL_RTX;
5970 emit_cmp_insn (subtarget, old_val, EQ, const0_rtx, mode, true);
5973 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
5974 setcc instruction from the beginning. We don't work too hard here,
5975 but it's nice to not be stupid about initial code gen either. */
5976 if (STORE_FLAG_VALUE == 1)
5978 icode = setcc_gen_code[EQ];
5979 if (icode != CODE_FOR_nothing)
5981 enum machine_mode cmode = insn_data[icode].operand[0].mode;
5982 rtx insn;
5984 subtarget = target;
5985 if (!insn_data[icode].operand[0].predicate (target, cmode))
5986 subtarget = gen_reg_rtx (cmode);
5988 insn = GEN_FCN (icode) (subtarget);
5989 if (insn)
5991 emit_insn (insn);
5992 if (GET_MODE (target) != GET_MODE (subtarget))
5994 convert_move (target, subtarget, 1);
5995 subtarget = target;
5997 return subtarget;
6002 /* Without an appropriate setcc instruction, use a set of branches to
6003 get 1 and 0 stored into target. Presumably if the target has a
6004 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
6006 label0 = gen_label_rtx ();
6007 label1 = gen_label_rtx ();
6009 emit_jump_insn (bcc_gen_fctn[EQ] (label0));
6010 emit_move_insn (target, const0_rtx);
6011 emit_jump_insn (gen_jump (label1));
6012 emit_barrier ();
6013 emit_label (label0);
6014 emit_move_insn (target, const1_rtx);
6015 emit_label (label1);
6017 return target;
6020 /* This is a helper function for the other atomic operations. This function
6021 emits a loop that contains SEQ that iterates until a compare-and-swap
6022 operation at the end succeeds. MEM is the memory to be modified. SEQ is
6023 a set of instructions that takes a value from OLD_REG as an input and
6024 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
6025 set to the current contents of MEM. After SEQ, a compare-and-swap will
6026 attempt to update MEM with NEW_REG. The function returns true when the
6027 loop was generated successfully. */
6029 static bool
6030 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
6032 enum machine_mode mode = GET_MODE (mem);
6033 enum insn_code icode;
6034 rtx label, cmp_reg, subtarget;
6036 /* The loop we want to generate looks like
6038 cmp_reg = mem;
6039 label:
6040 old_reg = cmp_reg;
6041 seq;
6042 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
6043 if (cmp_reg != old_reg)
6044 goto label;
6046 Note that we only do the plain load from memory once. Subsequent
6047 iterations use the value loaded by the compare-and-swap pattern. */
6049 label = gen_label_rtx ();
6050 cmp_reg = gen_reg_rtx (mode);
6052 emit_move_insn (cmp_reg, mem);
6053 emit_label (label);
6054 emit_move_insn (old_reg, cmp_reg);
6055 if (seq)
6056 emit_insn (seq);
6058 /* If the target supports a compare-and-swap pattern that simultaneously
6059 sets some flag for success, then use it. Otherwise use the regular
6060 compare-and-swap and follow that immediately with a compare insn. */
6061 icode = sync_compare_and_swap_cc[mode];
6062 switch (icode)
6064 default:
6065 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
6066 cmp_reg, icode);
6067 if (subtarget != NULL_RTX)
6069 gcc_assert (subtarget == cmp_reg);
6070 break;
6073 /* FALLTHRU */
6074 case CODE_FOR_nothing:
6075 icode = sync_compare_and_swap[mode];
6076 if (icode == CODE_FOR_nothing)
6077 return false;
6079 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
6080 cmp_reg, icode);
6081 if (subtarget == NULL_RTX)
6082 return false;
6083 if (subtarget != cmp_reg)
6084 emit_move_insn (cmp_reg, subtarget);
6086 emit_cmp_insn (cmp_reg, old_reg, EQ, const0_rtx, mode, true);
6089 /* ??? Mark this jump predicted not taken? */
6090 emit_jump_insn (bcc_gen_fctn[NE] (label));
6092 return true;
6095 /* This function generates the atomic operation MEM CODE= VAL. In this
6096 case, we do not care about any resulting value. Returns NULL if we
6097 cannot generate the operation. */
6100 expand_sync_operation (rtx mem, rtx val, enum rtx_code code)
6102 enum machine_mode mode = GET_MODE (mem);
6103 enum insn_code icode;
6104 rtx insn;
6106 /* Look to see if the target supports the operation directly. */
6107 switch (code)
6109 case PLUS:
6110 icode = sync_add_optab[mode];
6111 break;
6112 case IOR:
6113 icode = sync_ior_optab[mode];
6114 break;
6115 case XOR:
6116 icode = sync_xor_optab[mode];
6117 break;
6118 case AND:
6119 icode = sync_and_optab[mode];
6120 break;
6121 case NOT:
6122 icode = sync_nand_optab[mode];
6123 break;
6125 case MINUS:
6126 icode = sync_sub_optab[mode];
6127 if (icode == CODE_FOR_nothing)
6129 icode = sync_add_optab[mode];
6130 if (icode != CODE_FOR_nothing)
6132 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
6133 code = PLUS;
6136 break;
6138 default:
6139 gcc_unreachable ();
6142 /* Generate the direct operation, if present. */
6143 if (icode != CODE_FOR_nothing)
6145 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6146 val = convert_modes (mode, GET_MODE (val), val, 1);
6147 if (!insn_data[icode].operand[1].predicate (val, mode))
6148 val = force_reg (mode, val);
6150 insn = GEN_FCN (icode) (mem, val);
6151 if (insn)
6153 emit_insn (insn);
6154 return const0_rtx;
6158 /* Failing that, generate a compare-and-swap loop in which we perform the
6159 operation with normal arithmetic instructions. */
6160 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6162 rtx t0 = gen_reg_rtx (mode), t1;
6164 start_sequence ();
6166 t1 = t0;
6167 if (code == NOT)
6169 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6170 code = AND;
6172 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6173 true, OPTAB_LIB_WIDEN);
6175 insn = get_insns ();
6176 end_sequence ();
6178 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6179 return const0_rtx;
6182 return NULL_RTX;
6185 /* This function generates the atomic operation MEM CODE= VAL. In this
6186 case, we do care about the resulting value: if AFTER is true then
6187 return the value MEM holds after the operation, if AFTER is false
6188 then return the value MEM holds before the operation. TARGET is an
6189 optional place for the result value to be stored. */
6192 expand_sync_fetch_operation (rtx mem, rtx val, enum rtx_code code,
6193 bool after, rtx target)
6195 enum machine_mode mode = GET_MODE (mem);
6196 enum insn_code old_code, new_code, icode;
6197 bool compensate;
6198 rtx insn;
6200 /* Look to see if the target supports the operation directly. */
6201 switch (code)
6203 case PLUS:
6204 old_code = sync_old_add_optab[mode];
6205 new_code = sync_new_add_optab[mode];
6206 break;
6207 case IOR:
6208 old_code = sync_old_ior_optab[mode];
6209 new_code = sync_new_ior_optab[mode];
6210 break;
6211 case XOR:
6212 old_code = sync_old_xor_optab[mode];
6213 new_code = sync_new_xor_optab[mode];
6214 break;
6215 case AND:
6216 old_code = sync_old_and_optab[mode];
6217 new_code = sync_new_and_optab[mode];
6218 break;
6219 case NOT:
6220 old_code = sync_old_nand_optab[mode];
6221 new_code = sync_new_nand_optab[mode];
6222 break;
6224 case MINUS:
6225 old_code = sync_old_sub_optab[mode];
6226 new_code = sync_new_sub_optab[mode];
6227 if (old_code == CODE_FOR_nothing && new_code == CODE_FOR_nothing)
6229 old_code = sync_old_add_optab[mode];
6230 new_code = sync_new_add_optab[mode];
6231 if (old_code != CODE_FOR_nothing || new_code != CODE_FOR_nothing)
6233 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
6234 code = PLUS;
6237 break;
6239 default:
6240 gcc_unreachable ();
6243 /* If the target does supports the proper new/old operation, great. But
6244 if we only support the opposite old/new operation, check to see if we
6245 can compensate. In the case in which the old value is supported, then
6246 we can always perform the operation again with normal arithmetic. In
6247 the case in which the new value is supported, then we can only handle
6248 this in the case the operation is reversible. */
6249 compensate = false;
6250 if (after)
6252 icode = new_code;
6253 if (icode == CODE_FOR_nothing)
6255 icode = old_code;
6256 if (icode != CODE_FOR_nothing)
6257 compensate = true;
6260 else
6262 icode = old_code;
6263 if (icode == CODE_FOR_nothing
6264 && (code == PLUS || code == MINUS || code == XOR))
6266 icode = new_code;
6267 if (icode != CODE_FOR_nothing)
6268 compensate = true;
6272 /* If we found something supported, great. */
6273 if (icode != CODE_FOR_nothing)
6275 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6276 target = gen_reg_rtx (mode);
6278 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6279 val = convert_modes (mode, GET_MODE (val), val, 1);
6280 if (!insn_data[icode].operand[2].predicate (val, mode))
6281 val = force_reg (mode, val);
6283 insn = GEN_FCN (icode) (target, mem, val);
6284 if (insn)
6286 emit_insn (insn);
6288 /* If we need to compensate for using an operation with the
6289 wrong return value, do so now. */
6290 if (compensate)
6292 if (!after)
6294 if (code == PLUS)
6295 code = MINUS;
6296 else if (code == MINUS)
6297 code = PLUS;
6300 if (code == NOT)
6301 target = expand_simple_unop (mode, NOT, target, NULL_RTX, true);
6302 target = expand_simple_binop (mode, code, target, val, NULL_RTX,
6303 true, OPTAB_LIB_WIDEN);
6306 return target;
6310 /* Failing that, generate a compare-and-swap loop in which we perform the
6311 operation with normal arithmetic instructions. */
6312 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6314 rtx t0 = gen_reg_rtx (mode), t1;
6316 if (!target || !register_operand (target, mode))
6317 target = gen_reg_rtx (mode);
6319 start_sequence ();
6321 if (!after)
6322 emit_move_insn (target, t0);
6323 t1 = t0;
6324 if (code == NOT)
6326 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6327 code = AND;
6329 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6330 true, OPTAB_LIB_WIDEN);
6331 if (after)
6332 emit_move_insn (target, t1);
6334 insn = get_insns ();
6335 end_sequence ();
6337 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6338 return target;
6341 return NULL_RTX;
6344 /* This function expands a test-and-set operation. Ideally we atomically
6345 store VAL in MEM and return the previous value in MEM. Some targets
6346 may not support this operation and only support VAL with the constant 1;
6347 in this case while the return value will be 0/1, but the exact value
6348 stored in MEM is target defined. TARGET is an option place to stick
6349 the return value. */
6352 expand_sync_lock_test_and_set (rtx mem, rtx val, rtx target)
6354 enum machine_mode mode = GET_MODE (mem);
6355 enum insn_code icode;
6356 rtx insn;
6358 /* If the target supports the test-and-set directly, great. */
6359 icode = sync_lock_test_and_set[mode];
6360 if (icode != CODE_FOR_nothing)
6362 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6363 target = gen_reg_rtx (mode);
6365 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6366 val = convert_modes (mode, GET_MODE (val), val, 1);
6367 if (!insn_data[icode].operand[2].predicate (val, mode))
6368 val = force_reg (mode, val);
6370 insn = GEN_FCN (icode) (target, mem, val);
6371 if (insn)
6373 emit_insn (insn);
6374 return target;
6378 /* Otherwise, use a compare-and-swap loop for the exchange. */
6379 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6381 if (!target || !register_operand (target, mode))
6382 target = gen_reg_rtx (mode);
6383 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6384 val = convert_modes (mode, GET_MODE (val), val, 1);
6385 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
6386 return target;
6389 return NULL_RTX;
6392 #include "gt-optabs.h"