* config/rs6000/rs6000.md (popcount<mode>2): Rewrite.
[official-gcc.git] / gcc / optabs.c
blobe66c115c0c022e04b18d13b3df54080ccdf4b99b
1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
21 02110-1301, USA. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "toplev.h"
30 /* Include insn-config.h before expr.h so that HAVE_conditional_move
31 is properly defined. */
32 #include "insn-config.h"
33 #include "rtl.h"
34 #include "tree.h"
35 #include "tm_p.h"
36 #include "flags.h"
37 #include "function.h"
38 #include "except.h"
39 #include "expr.h"
40 #include "optabs.h"
41 #include "libfuncs.h"
42 #include "recog.h"
43 #include "reload.h"
44 #include "ggc.h"
45 #include "real.h"
46 #include "basic-block.h"
47 #include "target.h"
49 /* Each optab contains info on how this target machine
50 can perform a particular operation
51 for all sizes and kinds of operands.
53 The operation to be performed is often specified
54 by passing one of these optabs as an argument.
56 See expr.h for documentation of these optabs. */
58 optab optab_table[OTI_MAX];
60 rtx libfunc_table[LTI_MAX];
62 /* Tables of patterns for converting one mode to another. */
63 convert_optab convert_optab_table[COI_MAX];
65 /* Contains the optab used for each rtx code. */
66 optab code_to_optab[NUM_RTX_CODE + 1];
68 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
69 gives the gen_function to make a branch to test that condition. */
71 rtxfun bcc_gen_fctn[NUM_RTX_CODE];
73 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
74 gives the insn code to make a store-condition insn
75 to test that condition. */
77 enum insn_code setcc_gen_code[NUM_RTX_CODE];
79 #ifdef HAVE_conditional_move
80 /* Indexed by the machine mode, gives the insn code to make a conditional
81 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
82 setcc_gen_code to cut down on the number of named patterns. Consider a day
83 when a lot more rtx codes are conditional (eg: for the ARM). */
85 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
86 #endif
88 /* Indexed by the machine mode, gives the insn code for vector conditional
89 operation. */
91 enum insn_code vcond_gen_code[NUM_MACHINE_MODES];
92 enum insn_code vcondu_gen_code[NUM_MACHINE_MODES];
94 /* The insn generating function can not take an rtx_code argument.
95 TRAP_RTX is used as an rtx argument. Its code is replaced with
96 the code to be used in the trap insn and all other fields are ignored. */
97 static GTY(()) rtx trap_rtx;
99 static int add_equal_note (rtx, rtx, enum rtx_code, rtx, rtx);
100 static rtx widen_operand (rtx, enum machine_mode, enum machine_mode, int,
101 int);
102 static void prepare_cmp_insn (rtx *, rtx *, enum rtx_code *, rtx,
103 enum machine_mode *, int *,
104 enum can_compare_purpose);
105 static enum insn_code can_fix_p (enum machine_mode, enum machine_mode, int,
106 int *);
107 static enum insn_code can_float_p (enum machine_mode, enum machine_mode, int);
108 static optab new_optab (void);
109 static convert_optab new_convert_optab (void);
110 static inline optab init_optab (enum rtx_code);
111 static inline optab init_optabv (enum rtx_code);
112 static inline convert_optab init_convert_optab (enum rtx_code);
113 static void init_libfuncs (optab, int, int, const char *, int);
114 static void init_integral_libfuncs (optab, const char *, int);
115 static void init_floating_libfuncs (optab, const char *, int);
116 static void init_interclass_conv_libfuncs (convert_optab, const char *,
117 enum mode_class, enum mode_class);
118 static void init_intraclass_conv_libfuncs (convert_optab, const char *,
119 enum mode_class, bool);
120 static void emit_cmp_and_jump_insn_1 (rtx, rtx, enum machine_mode,
121 enum rtx_code, int, rtx);
122 static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *,
123 enum machine_mode *, int *);
124 static rtx widen_clz (enum machine_mode, rtx, rtx);
125 static rtx expand_parity (enum machine_mode, rtx, rtx);
126 static enum rtx_code get_rtx_code (enum tree_code, bool);
127 static rtx vector_compare_rtx (tree, bool, enum insn_code);
129 #ifndef HAVE_conditional_trap
130 #define HAVE_conditional_trap 0
131 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
132 #endif
134 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
135 the result of operation CODE applied to OP0 (and OP1 if it is a binary
136 operation).
138 If the last insn does not set TARGET, don't do anything, but return 1.
140 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
141 don't add the REG_EQUAL note but return 0. Our caller can then try
142 again, ensuring that TARGET is not one of the operands. */
144 static int
145 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
147 rtx last_insn, insn, set;
148 rtx note;
150 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
152 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
153 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
154 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
155 && GET_RTX_CLASS (code) != RTX_COMPARE
156 && GET_RTX_CLASS (code) != RTX_UNARY)
157 return 1;
159 if (GET_CODE (target) == ZERO_EXTRACT)
160 return 1;
162 for (last_insn = insns;
163 NEXT_INSN (last_insn) != NULL_RTX;
164 last_insn = NEXT_INSN (last_insn))
167 set = single_set (last_insn);
168 if (set == NULL_RTX)
169 return 1;
171 if (! rtx_equal_p (SET_DEST (set), target)
172 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
173 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
174 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
175 return 1;
177 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
178 besides the last insn. */
179 if (reg_overlap_mentioned_p (target, op0)
180 || (op1 && reg_overlap_mentioned_p (target, op1)))
182 insn = PREV_INSN (last_insn);
183 while (insn != NULL_RTX)
185 if (reg_set_p (target, insn))
186 return 0;
188 insn = PREV_INSN (insn);
192 if (GET_RTX_CLASS (code) == RTX_UNARY)
193 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
194 else
195 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
197 set_unique_reg_note (last_insn, REG_EQUAL, note);
199 return 1;
202 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
203 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
204 not actually do a sign-extend or zero-extend, but can leave the
205 higher-order bits of the result rtx undefined, for example, in the case
206 of logical operations, but not right shifts. */
208 static rtx
209 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
210 int unsignedp, int no_extend)
212 rtx result;
214 /* If we don't have to extend and this is a constant, return it. */
215 if (no_extend && GET_MODE (op) == VOIDmode)
216 return op;
218 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
219 extend since it will be more efficient to do so unless the signedness of
220 a promoted object differs from our extension. */
221 if (! no_extend
222 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
223 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
224 return convert_modes (mode, oldmode, op, unsignedp);
226 /* If MODE is no wider than a single word, we return a paradoxical
227 SUBREG. */
228 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
229 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
231 /* Otherwise, get an object of MODE, clobber it, and set the low-order
232 part to OP. */
234 result = gen_reg_rtx (mode);
235 emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
236 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
237 return result;
240 /* Return the optab used for computing the operation given by
241 the tree code, CODE. This function is not always usable (for
242 example, it cannot give complete results for multiplication
243 or division) but probably ought to be relied on more widely
244 throughout the expander. */
245 optab
246 optab_for_tree_code (enum tree_code code, tree type)
248 bool trapv;
249 switch (code)
251 case BIT_AND_EXPR:
252 return and_optab;
254 case BIT_IOR_EXPR:
255 return ior_optab;
257 case BIT_NOT_EXPR:
258 return one_cmpl_optab;
260 case BIT_XOR_EXPR:
261 return xor_optab;
263 case TRUNC_MOD_EXPR:
264 case CEIL_MOD_EXPR:
265 case FLOOR_MOD_EXPR:
266 case ROUND_MOD_EXPR:
267 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
269 case RDIV_EXPR:
270 case TRUNC_DIV_EXPR:
271 case CEIL_DIV_EXPR:
272 case FLOOR_DIV_EXPR:
273 case ROUND_DIV_EXPR:
274 case EXACT_DIV_EXPR:
275 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
277 case LSHIFT_EXPR:
278 return ashl_optab;
280 case RSHIFT_EXPR:
281 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
283 case LROTATE_EXPR:
284 return rotl_optab;
286 case RROTATE_EXPR:
287 return rotr_optab;
289 case MAX_EXPR:
290 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
292 case MIN_EXPR:
293 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
295 case REALIGN_LOAD_EXPR:
296 return vec_realign_load_optab;
298 case WIDEN_SUM_EXPR:
299 return TYPE_UNSIGNED (type) ? usum_widen_optab : ssum_widen_optab;
301 case DOT_PROD_EXPR:
302 return TYPE_UNSIGNED (type) ? udot_prod_optab : sdot_prod_optab;
304 case REDUC_MAX_EXPR:
305 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
307 case REDUC_MIN_EXPR:
308 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
310 case REDUC_PLUS_EXPR:
311 return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
313 case VEC_LSHIFT_EXPR:
314 return vec_shl_optab;
316 case VEC_RSHIFT_EXPR:
317 return vec_shr_optab;
319 case VEC_WIDEN_MULT_HI_EXPR:
320 return TYPE_UNSIGNED (type) ?
321 vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
323 case VEC_WIDEN_MULT_LO_EXPR:
324 return TYPE_UNSIGNED (type) ?
325 vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
327 case VEC_UNPACK_HI_EXPR:
328 return TYPE_UNSIGNED (type) ?
329 vec_unpacku_hi_optab : vec_unpacks_hi_optab;
331 case VEC_UNPACK_LO_EXPR:
332 return TYPE_UNSIGNED (type) ?
333 vec_unpacku_lo_optab : vec_unpacks_lo_optab;
335 case VEC_PACK_MOD_EXPR:
336 return vec_pack_mod_optab;
338 case VEC_PACK_SAT_EXPR:
339 return TYPE_UNSIGNED (type) ? vec_pack_usat_optab : vec_pack_ssat_optab;
341 default:
342 break;
345 trapv = INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type);
346 switch (code)
348 case PLUS_EXPR:
349 return trapv ? addv_optab : add_optab;
351 case MINUS_EXPR:
352 return trapv ? subv_optab : sub_optab;
354 case MULT_EXPR:
355 return trapv ? smulv_optab : smul_optab;
357 case NEGATE_EXPR:
358 return trapv ? negv_optab : neg_optab;
360 case ABS_EXPR:
361 return trapv ? absv_optab : abs_optab;
363 case VEC_EXTRACT_EVEN_EXPR:
364 return vec_extract_even_optab;
366 case VEC_EXTRACT_ODD_EXPR:
367 return vec_extract_odd_optab;
369 case VEC_INTERLEAVE_HIGH_EXPR:
370 return vec_interleave_high_optab;
372 case VEC_INTERLEAVE_LOW_EXPR:
373 return vec_interleave_low_optab;
375 default:
376 return NULL;
381 /* Expand vector widening operations.
383 There are two different classes of operations handled here:
384 1) Operations whose result is wider than all the arguments to the operation.
385 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
386 In this case OP0 and optionally OP1 would be initialized,
387 but WIDE_OP wouldn't (not relevant for this case).
388 2) Operations whose result is of the same size as the last argument to the
389 operation, but wider than all the other arguments to the operation.
390 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
391 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
393 E.g, when called to expand the following operations, this is how
394 the arguments will be initialized:
395 nops OP0 OP1 WIDE_OP
396 widening-sum 2 oprnd0 - oprnd1
397 widening-dot-product 3 oprnd0 oprnd1 oprnd2
398 widening-mult 2 oprnd0 oprnd1 -
399 type-promotion (vec-unpack) 1 oprnd0 - - */
402 expand_widen_pattern_expr (tree exp, rtx op0, rtx op1, rtx wide_op, rtx target,
403 int unsignedp)
405 tree oprnd0, oprnd1, oprnd2;
406 enum machine_mode wmode = 0, tmode0, tmode1 = 0;
407 optab widen_pattern_optab;
408 int icode;
409 enum machine_mode xmode0, xmode1 = 0, wxmode = 0;
410 rtx temp;
411 rtx pat;
412 rtx xop0, xop1, wxop;
413 int nops = TREE_CODE_LENGTH (TREE_CODE (exp));
415 oprnd0 = TREE_OPERAND (exp, 0);
416 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
417 widen_pattern_optab =
418 optab_for_tree_code (TREE_CODE (exp), TREE_TYPE (oprnd0));
419 icode = (int) widen_pattern_optab->handlers[(int) tmode0].insn_code;
420 gcc_assert (icode != CODE_FOR_nothing);
421 xmode0 = insn_data[icode].operand[1].mode;
423 if (nops >= 2)
425 oprnd1 = TREE_OPERAND (exp, 1);
426 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
427 xmode1 = insn_data[icode].operand[2].mode;
430 /* The last operand is of a wider mode than the rest of the operands. */
431 if (nops == 2)
433 wmode = tmode1;
434 wxmode = xmode1;
436 else if (nops == 3)
438 gcc_assert (tmode1 == tmode0);
439 gcc_assert (op1);
440 oprnd2 = TREE_OPERAND (exp, 2);
441 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
442 wxmode = insn_data[icode].operand[3].mode;
445 if (!wide_op)
446 wmode = wxmode = insn_data[icode].operand[0].mode;
448 if (!target
449 || ! (*insn_data[icode].operand[0].predicate) (target, wmode))
450 temp = gen_reg_rtx (wmode);
451 else
452 temp = target;
454 xop0 = op0;
455 xop1 = op1;
456 wxop = wide_op;
458 /* In case the insn wants input operands in modes different from
459 those of the actual operands, convert the operands. It would
460 seem that we don't need to convert CONST_INTs, but we do, so
461 that they're properly zero-extended, sign-extended or truncated
462 for their mode. */
464 if (GET_MODE (op0) != xmode0 && xmode0 != VOIDmode)
465 xop0 = convert_modes (xmode0,
466 GET_MODE (op0) != VOIDmode
467 ? GET_MODE (op0)
468 : tmode0,
469 xop0, unsignedp);
471 if (op1)
472 if (GET_MODE (op1) != xmode1 && xmode1 != VOIDmode)
473 xop1 = convert_modes (xmode1,
474 GET_MODE (op1) != VOIDmode
475 ? GET_MODE (op1)
476 : tmode1,
477 xop1, unsignedp);
479 if (wide_op)
480 if (GET_MODE (wide_op) != wxmode && wxmode != VOIDmode)
481 wxop = convert_modes (wxmode,
482 GET_MODE (wide_op) != VOIDmode
483 ? GET_MODE (wide_op)
484 : wmode,
485 wxop, unsignedp);
487 /* Now, if insn's predicates don't allow our operands, put them into
488 pseudo regs. */
490 if (! (*insn_data[icode].operand[1].predicate) (xop0, xmode0)
491 && xmode0 != VOIDmode)
492 xop0 = copy_to_mode_reg (xmode0, xop0);
494 if (op1)
496 if (! (*insn_data[icode].operand[2].predicate) (xop1, xmode1)
497 && xmode1 != VOIDmode)
498 xop1 = copy_to_mode_reg (xmode1, xop1);
500 if (wide_op)
502 if (! (*insn_data[icode].operand[3].predicate) (wxop, wxmode)
503 && wxmode != VOIDmode)
504 wxop = copy_to_mode_reg (wxmode, wxop);
506 pat = GEN_FCN (icode) (temp, xop0, xop1, wxop);
508 else
509 pat = GEN_FCN (icode) (temp, xop0, xop1);
511 else
513 if (wide_op)
515 if (! (*insn_data[icode].operand[2].predicate) (wxop, wxmode)
516 && wxmode != VOIDmode)
517 wxop = copy_to_mode_reg (wxmode, wxop);
519 pat = GEN_FCN (icode) (temp, xop0, wxop);
521 else
522 pat = GEN_FCN (icode) (temp, xop0);
525 emit_insn (pat);
526 return temp;
529 /* Generate code to perform an operation specified by TERNARY_OPTAB
530 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
532 UNSIGNEDP is for the case where we have to widen the operands
533 to perform the operation. It says to use zero-extension.
535 If TARGET is nonzero, the value
536 is generated there, if it is convenient to do so.
537 In all cases an rtx is returned for the locus of the value;
538 this may or may not be TARGET. */
541 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
542 rtx op1, rtx op2, rtx target, int unsignedp)
544 int icode = (int) ternary_optab->handlers[(int) mode].insn_code;
545 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
546 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
547 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
548 rtx temp;
549 rtx pat;
550 rtx xop0 = op0, xop1 = op1, xop2 = op2;
552 gcc_assert (ternary_optab->handlers[(int) mode].insn_code
553 != CODE_FOR_nothing);
555 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
556 temp = gen_reg_rtx (mode);
557 else
558 temp = target;
560 /* In case the insn wants input operands in modes different from
561 those of the actual operands, convert the operands. It would
562 seem that we don't need to convert CONST_INTs, but we do, so
563 that they're properly zero-extended, sign-extended or truncated
564 for their mode. */
566 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
567 xop0 = convert_modes (mode0,
568 GET_MODE (op0) != VOIDmode
569 ? GET_MODE (op0)
570 : mode,
571 xop0, unsignedp);
573 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
574 xop1 = convert_modes (mode1,
575 GET_MODE (op1) != VOIDmode
576 ? GET_MODE (op1)
577 : mode,
578 xop1, unsignedp);
580 if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
581 xop2 = convert_modes (mode2,
582 GET_MODE (op2) != VOIDmode
583 ? GET_MODE (op2)
584 : mode,
585 xop2, unsignedp);
587 /* Now, if insn's predicates don't allow our operands, put them into
588 pseudo regs. */
590 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
591 && mode0 != VOIDmode)
592 xop0 = copy_to_mode_reg (mode0, xop0);
594 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
595 && mode1 != VOIDmode)
596 xop1 = copy_to_mode_reg (mode1, xop1);
598 if (!insn_data[icode].operand[3].predicate (xop2, mode2)
599 && mode2 != VOIDmode)
600 xop2 = copy_to_mode_reg (mode2, xop2);
602 pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
604 emit_insn (pat);
605 return temp;
609 /* Like expand_binop, but return a constant rtx if the result can be
610 calculated at compile time. The arguments and return value are
611 otherwise the same as for expand_binop. */
613 static rtx
614 simplify_expand_binop (enum machine_mode mode, optab binoptab,
615 rtx op0, rtx op1, rtx target, int unsignedp,
616 enum optab_methods methods)
618 if (CONSTANT_P (op0) && CONSTANT_P (op1))
620 rtx x = simplify_binary_operation (binoptab->code, mode, op0, op1);
622 if (x)
623 return x;
626 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
629 /* Like simplify_expand_binop, but always put the result in TARGET.
630 Return true if the expansion succeeded. */
632 bool
633 force_expand_binop (enum machine_mode mode, optab binoptab,
634 rtx op0, rtx op1, rtx target, int unsignedp,
635 enum optab_methods methods)
637 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
638 target, unsignedp, methods);
639 if (x == 0)
640 return false;
641 if (x != target)
642 emit_move_insn (target, x);
643 return true;
646 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
649 expand_vec_shift_expr (tree vec_shift_expr, rtx target)
651 enum insn_code icode;
652 rtx rtx_op1, rtx_op2;
653 enum machine_mode mode1;
654 enum machine_mode mode2;
655 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_shift_expr));
656 tree vec_oprnd = TREE_OPERAND (vec_shift_expr, 0);
657 tree shift_oprnd = TREE_OPERAND (vec_shift_expr, 1);
658 optab shift_optab;
659 rtx pat;
661 switch (TREE_CODE (vec_shift_expr))
663 case VEC_RSHIFT_EXPR:
664 shift_optab = vec_shr_optab;
665 break;
666 case VEC_LSHIFT_EXPR:
667 shift_optab = vec_shl_optab;
668 break;
669 default:
670 gcc_unreachable ();
673 icode = (int) shift_optab->handlers[(int) mode].insn_code;
674 gcc_assert (icode != CODE_FOR_nothing);
676 mode1 = insn_data[icode].operand[1].mode;
677 mode2 = insn_data[icode].operand[2].mode;
679 rtx_op1 = expand_expr (vec_oprnd, NULL_RTX, VOIDmode, EXPAND_NORMAL);
680 if (!(*insn_data[icode].operand[1].predicate) (rtx_op1, mode1)
681 && mode1 != VOIDmode)
682 rtx_op1 = force_reg (mode1, rtx_op1);
684 rtx_op2 = expand_expr (shift_oprnd, NULL_RTX, VOIDmode, EXPAND_NORMAL);
685 if (!(*insn_data[icode].operand[2].predicate) (rtx_op2, mode2)
686 && mode2 != VOIDmode)
687 rtx_op2 = force_reg (mode2, rtx_op2);
689 if (!target
690 || ! (*insn_data[icode].operand[0].predicate) (target, mode))
691 target = gen_reg_rtx (mode);
693 /* Emit instruction */
694 pat = GEN_FCN (icode) (target, rtx_op1, rtx_op2);
695 gcc_assert (pat);
696 emit_insn (pat);
698 return target;
701 /* This subroutine of expand_doubleword_shift handles the cases in which
702 the effective shift value is >= BITS_PER_WORD. The arguments and return
703 value are the same as for the parent routine, except that SUPERWORD_OP1
704 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
705 INTO_TARGET may be null if the caller has decided to calculate it. */
707 static bool
708 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
709 rtx outof_target, rtx into_target,
710 int unsignedp, enum optab_methods methods)
712 if (into_target != 0)
713 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
714 into_target, unsignedp, methods))
715 return false;
717 if (outof_target != 0)
719 /* For a signed right shift, we must fill OUTOF_TARGET with copies
720 of the sign bit, otherwise we must fill it with zeros. */
721 if (binoptab != ashr_optab)
722 emit_move_insn (outof_target, CONST0_RTX (word_mode));
723 else
724 if (!force_expand_binop (word_mode, binoptab,
725 outof_input, GEN_INT (BITS_PER_WORD - 1),
726 outof_target, unsignedp, methods))
727 return false;
729 return true;
732 /* This subroutine of expand_doubleword_shift handles the cases in which
733 the effective shift value is < BITS_PER_WORD. The arguments and return
734 value are the same as for the parent routine. */
736 static bool
737 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
738 rtx outof_input, rtx into_input, rtx op1,
739 rtx outof_target, rtx into_target,
740 int unsignedp, enum optab_methods methods,
741 unsigned HOST_WIDE_INT shift_mask)
743 optab reverse_unsigned_shift, unsigned_shift;
744 rtx tmp, carries;
746 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
747 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
749 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
750 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
751 the opposite direction to BINOPTAB. */
752 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
754 carries = outof_input;
755 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
756 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
757 0, true, methods);
759 else
761 /* We must avoid shifting by BITS_PER_WORD bits since that is either
762 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
763 has unknown behavior. Do a single shift first, then shift by the
764 remainder. It's OK to use ~OP1 as the remainder if shift counts
765 are truncated to the mode size. */
766 carries = expand_binop (word_mode, reverse_unsigned_shift,
767 outof_input, const1_rtx, 0, unsignedp, methods);
768 if (shift_mask == BITS_PER_WORD - 1)
770 tmp = immed_double_const (-1, -1, op1_mode);
771 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
772 0, true, methods);
774 else
776 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
777 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
778 0, true, methods);
781 if (tmp == 0 || carries == 0)
782 return false;
783 carries = expand_binop (word_mode, reverse_unsigned_shift,
784 carries, tmp, 0, unsignedp, methods);
785 if (carries == 0)
786 return false;
788 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
789 so the result can go directly into INTO_TARGET if convenient. */
790 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
791 into_target, unsignedp, methods);
792 if (tmp == 0)
793 return false;
795 /* Now OR in the bits carried over from OUTOF_INPUT. */
796 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
797 into_target, unsignedp, methods))
798 return false;
800 /* Use a standard word_mode shift for the out-of half. */
801 if (outof_target != 0)
802 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
803 outof_target, unsignedp, methods))
804 return false;
806 return true;
810 #ifdef HAVE_conditional_move
811 /* Try implementing expand_doubleword_shift using conditional moves.
812 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
813 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
814 are the shift counts to use in the former and latter case. All other
815 arguments are the same as the parent routine. */
817 static bool
818 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
819 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
820 rtx outof_input, rtx into_input,
821 rtx subword_op1, rtx superword_op1,
822 rtx outof_target, rtx into_target,
823 int unsignedp, enum optab_methods methods,
824 unsigned HOST_WIDE_INT shift_mask)
826 rtx outof_superword, into_superword;
828 /* Put the superword version of the output into OUTOF_SUPERWORD and
829 INTO_SUPERWORD. */
830 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
831 if (outof_target != 0 && subword_op1 == superword_op1)
833 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
834 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
835 into_superword = outof_target;
836 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
837 outof_superword, 0, unsignedp, methods))
838 return false;
840 else
842 into_superword = gen_reg_rtx (word_mode);
843 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
844 outof_superword, into_superword,
845 unsignedp, methods))
846 return false;
849 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
850 if (!expand_subword_shift (op1_mode, binoptab,
851 outof_input, into_input, subword_op1,
852 outof_target, into_target,
853 unsignedp, methods, shift_mask))
854 return false;
856 /* Select between them. Do the INTO half first because INTO_SUPERWORD
857 might be the current value of OUTOF_TARGET. */
858 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
859 into_target, into_superword, word_mode, false))
860 return false;
862 if (outof_target != 0)
863 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
864 outof_target, outof_superword,
865 word_mode, false))
866 return false;
868 return true;
870 #endif
872 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
873 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
874 input operand; the shift moves bits in the direction OUTOF_INPUT->
875 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
876 of the target. OP1 is the shift count and OP1_MODE is its mode.
877 If OP1 is constant, it will have been truncated as appropriate
878 and is known to be nonzero.
880 If SHIFT_MASK is zero, the result of word shifts is undefined when the
881 shift count is outside the range [0, BITS_PER_WORD). This routine must
882 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
884 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
885 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
886 fill with zeros or sign bits as appropriate.
888 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
889 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
890 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
891 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
892 are undefined.
894 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
895 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
896 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
897 function wants to calculate it itself.
899 Return true if the shift could be successfully synthesized. */
901 static bool
902 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
903 rtx outof_input, rtx into_input, rtx op1,
904 rtx outof_target, rtx into_target,
905 int unsignedp, enum optab_methods methods,
906 unsigned HOST_WIDE_INT shift_mask)
908 rtx superword_op1, tmp, cmp1, cmp2;
909 rtx subword_label, done_label;
910 enum rtx_code cmp_code;
912 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
913 fill the result with sign or zero bits as appropriate. If so, the value
914 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
915 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
916 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
918 This isn't worthwhile for constant shifts since the optimizers will
919 cope better with in-range shift counts. */
920 if (shift_mask >= BITS_PER_WORD
921 && outof_target != 0
922 && !CONSTANT_P (op1))
924 if (!expand_doubleword_shift (op1_mode, binoptab,
925 outof_input, into_input, op1,
926 0, into_target,
927 unsignedp, methods, shift_mask))
928 return false;
929 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
930 outof_target, unsignedp, methods))
931 return false;
932 return true;
935 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
936 is true when the effective shift value is less than BITS_PER_WORD.
937 Set SUPERWORD_OP1 to the shift count that should be used to shift
938 OUTOF_INPUT into INTO_TARGET when the condition is false. */
939 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
940 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
942 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
943 is a subword shift count. */
944 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
945 0, true, methods);
946 cmp2 = CONST0_RTX (op1_mode);
947 cmp_code = EQ;
948 superword_op1 = op1;
950 else
952 /* Set CMP1 to OP1 - BITS_PER_WORD. */
953 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
954 0, true, methods);
955 cmp2 = CONST0_RTX (op1_mode);
956 cmp_code = LT;
957 superword_op1 = cmp1;
959 if (cmp1 == 0)
960 return false;
962 /* If we can compute the condition at compile time, pick the
963 appropriate subroutine. */
964 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
965 if (tmp != 0 && GET_CODE (tmp) == CONST_INT)
967 if (tmp == const0_rtx)
968 return expand_superword_shift (binoptab, outof_input, superword_op1,
969 outof_target, into_target,
970 unsignedp, methods);
971 else
972 return expand_subword_shift (op1_mode, binoptab,
973 outof_input, into_input, op1,
974 outof_target, into_target,
975 unsignedp, methods, shift_mask);
978 #ifdef HAVE_conditional_move
979 /* Try using conditional moves to generate straight-line code. */
981 rtx start = get_last_insn ();
982 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
983 cmp_code, cmp1, cmp2,
984 outof_input, into_input,
985 op1, superword_op1,
986 outof_target, into_target,
987 unsignedp, methods, shift_mask))
988 return true;
989 delete_insns_since (start);
991 #endif
993 /* As a last resort, use branches to select the correct alternative. */
994 subword_label = gen_label_rtx ();
995 done_label = gen_label_rtx ();
997 NO_DEFER_POP;
998 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
999 0, 0, subword_label);
1000 OK_DEFER_POP;
1002 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
1003 outof_target, into_target,
1004 unsignedp, methods))
1005 return false;
1007 emit_jump_insn (gen_jump (done_label));
1008 emit_barrier ();
1009 emit_label (subword_label);
1011 if (!expand_subword_shift (op1_mode, binoptab,
1012 outof_input, into_input, op1,
1013 outof_target, into_target,
1014 unsignedp, methods, shift_mask))
1015 return false;
1017 emit_label (done_label);
1018 return true;
1021 /* Subroutine of expand_binop. Perform a double word multiplication of
1022 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
1023 as the target's word_mode. This function return NULL_RTX if anything
1024 goes wrong, in which case it may have already emitted instructions
1025 which need to be deleted.
1027 If we want to multiply two two-word values and have normal and widening
1028 multiplies of single-word values, we can do this with three smaller
1029 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1030 because we are not operating on one word at a time.
1032 The multiplication proceeds as follows:
1033 _______________________
1034 [__op0_high_|__op0_low__]
1035 _______________________
1036 * [__op1_high_|__op1_low__]
1037 _______________________________________________
1038 _______________________
1039 (1) [__op0_low__*__op1_low__]
1040 _______________________
1041 (2a) [__op0_low__*__op1_high_]
1042 _______________________
1043 (2b) [__op0_high_*__op1_low__]
1044 _______________________
1045 (3) [__op0_high_*__op1_high_]
1048 This gives a 4-word result. Since we are only interested in the
1049 lower 2 words, partial result (3) and the upper words of (2a) and
1050 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1051 calculated using non-widening multiplication.
1053 (1), however, needs to be calculated with an unsigned widening
1054 multiplication. If this operation is not directly supported we
1055 try using a signed widening multiplication and adjust the result.
1056 This adjustment works as follows:
1058 If both operands are positive then no adjustment is needed.
1060 If the operands have different signs, for example op0_low < 0 and
1061 op1_low >= 0, the instruction treats the most significant bit of
1062 op0_low as a sign bit instead of a bit with significance
1063 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1064 with 2**BITS_PER_WORD - op0_low, and two's complements the
1065 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1066 the result.
1068 Similarly, if both operands are negative, we need to add
1069 (op0_low + op1_low) * 2**BITS_PER_WORD.
1071 We use a trick to adjust quickly. We logically shift op0_low right
1072 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1073 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1074 logical shift exists, we do an arithmetic right shift and subtract
1075 the 0 or -1. */
1077 static rtx
1078 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
1079 bool umulp, enum optab_methods methods)
1081 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
1082 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
1083 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
1084 rtx product, adjust, product_high, temp;
1086 rtx op0_high = operand_subword_force (op0, high, mode);
1087 rtx op0_low = operand_subword_force (op0, low, mode);
1088 rtx op1_high = operand_subword_force (op1, high, mode);
1089 rtx op1_low = operand_subword_force (op1, low, mode);
1091 /* If we're using an unsigned multiply to directly compute the product
1092 of the low-order words of the operands and perform any required
1093 adjustments of the operands, we begin by trying two more multiplications
1094 and then computing the appropriate sum.
1096 We have checked above that the required addition is provided.
1097 Full-word addition will normally always succeed, especially if
1098 it is provided at all, so we don't worry about its failure. The
1099 multiplication may well fail, however, so we do handle that. */
1101 if (!umulp)
1103 /* ??? This could be done with emit_store_flag where available. */
1104 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
1105 NULL_RTX, 1, methods);
1106 if (temp)
1107 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
1108 NULL_RTX, 0, OPTAB_DIRECT);
1109 else
1111 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
1112 NULL_RTX, 0, methods);
1113 if (!temp)
1114 return NULL_RTX;
1115 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
1116 NULL_RTX, 0, OPTAB_DIRECT);
1119 if (!op0_high)
1120 return NULL_RTX;
1123 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
1124 NULL_RTX, 0, OPTAB_DIRECT);
1125 if (!adjust)
1126 return NULL_RTX;
1128 /* OP0_HIGH should now be dead. */
1130 if (!umulp)
1132 /* ??? This could be done with emit_store_flag where available. */
1133 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
1134 NULL_RTX, 1, methods);
1135 if (temp)
1136 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
1137 NULL_RTX, 0, OPTAB_DIRECT);
1138 else
1140 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
1141 NULL_RTX, 0, methods);
1142 if (!temp)
1143 return NULL_RTX;
1144 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
1145 NULL_RTX, 0, OPTAB_DIRECT);
1148 if (!op1_high)
1149 return NULL_RTX;
1152 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
1153 NULL_RTX, 0, OPTAB_DIRECT);
1154 if (!temp)
1155 return NULL_RTX;
1157 /* OP1_HIGH should now be dead. */
1159 adjust = expand_binop (word_mode, add_optab, adjust, temp,
1160 adjust, 0, OPTAB_DIRECT);
1162 if (target && !REG_P (target))
1163 target = NULL_RTX;
1165 if (umulp)
1166 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
1167 target, 1, OPTAB_DIRECT);
1168 else
1169 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
1170 target, 1, OPTAB_DIRECT);
1172 if (!product)
1173 return NULL_RTX;
1175 product_high = operand_subword (product, high, 1, mode);
1176 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
1177 REG_P (product_high) ? product_high : adjust,
1178 0, OPTAB_DIRECT);
1179 emit_move_insn (product_high, adjust);
1180 return product;
1183 /* Wrapper around expand_binop which takes an rtx code to specify
1184 the operation to perform, not an optab pointer. All other
1185 arguments are the same. */
1187 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
1188 rtx op1, rtx target, int unsignedp,
1189 enum optab_methods methods)
1191 optab binop = code_to_optab[(int) code];
1192 gcc_assert (binop);
1194 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1197 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1198 binop. Order them according to commutative_operand_precedence and, if
1199 possible, try to put TARGET or a pseudo first. */
1200 static bool
1201 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
1203 int op0_prec = commutative_operand_precedence (op0);
1204 int op1_prec = commutative_operand_precedence (op1);
1206 if (op0_prec < op1_prec)
1207 return true;
1209 if (op0_prec > op1_prec)
1210 return false;
1212 /* With equal precedence, both orders are ok, but it is better if the
1213 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1214 if (target == 0 || REG_P (target))
1215 return (REG_P (op1) && !REG_P (op0)) || target == op1;
1216 else
1217 return rtx_equal_p (op1, target);
1221 /* Generate code to perform an operation specified by BINOPTAB
1222 on operands OP0 and OP1, with result having machine-mode MODE.
1224 UNSIGNEDP is for the case where we have to widen the operands
1225 to perform the operation. It says to use zero-extension.
1227 If TARGET is nonzero, the value
1228 is generated there, if it is convenient to do so.
1229 In all cases an rtx is returned for the locus of the value;
1230 this may or may not be TARGET. */
1233 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
1234 rtx target, int unsignedp, enum optab_methods methods)
1236 enum optab_methods next_methods
1237 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1238 ? OPTAB_WIDEN : methods);
1239 enum mode_class class;
1240 enum machine_mode wider_mode;
1241 rtx temp;
1242 int commutative_op = 0;
1243 int shift_op = (binoptab->code == ASHIFT
1244 || binoptab->code == ASHIFTRT
1245 || binoptab->code == LSHIFTRT
1246 || binoptab->code == ROTATE
1247 || binoptab->code == ROTATERT);
1248 rtx entry_last = get_last_insn ();
1249 rtx last;
1250 bool first_pass_p = true;
1252 class = GET_MODE_CLASS (mode);
1254 /* If subtracting an integer constant, convert this into an addition of
1255 the negated constant. */
1257 if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
1259 op1 = negate_rtx (mode, op1);
1260 binoptab = add_optab;
1263 /* If we are inside an appropriately-short loop and we are optimizing,
1264 force expensive constants into a register. */
1265 if (CONSTANT_P (op0) && optimize
1266 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
1268 if (GET_MODE (op0) != VOIDmode)
1269 op0 = convert_modes (mode, VOIDmode, op0, unsignedp);
1270 op0 = force_reg (mode, op0);
1273 if (CONSTANT_P (op1) && optimize
1274 && ! shift_op && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
1276 if (GET_MODE (op1) != VOIDmode)
1277 op1 = convert_modes (mode, VOIDmode, op1, unsignedp);
1278 op1 = force_reg (mode, op1);
1281 /* Record where to delete back to if we backtrack. */
1282 last = get_last_insn ();
1284 /* If operation is commutative,
1285 try to make the first operand a register.
1286 Even better, try to make it the same as the target.
1287 Also try to make the last operand a constant. */
1288 if (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1289 || binoptab == smul_widen_optab
1290 || binoptab == umul_widen_optab
1291 || binoptab == smul_highpart_optab
1292 || binoptab == umul_highpart_optab)
1294 commutative_op = 1;
1296 if (swap_commutative_operands_with_target (target, op0, op1))
1298 temp = op1;
1299 op1 = op0;
1300 op0 = temp;
1304 retry:
1306 /* If we can do it with a three-operand insn, do so. */
1308 if (methods != OPTAB_MUST_WIDEN
1309 && binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1311 int icode = (int) binoptab->handlers[(int) mode].insn_code;
1312 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1313 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1314 enum machine_mode tmp_mode;
1315 rtx pat;
1316 rtx xop0 = op0, xop1 = op1;
1318 if (target)
1319 temp = target;
1320 else
1321 temp = gen_reg_rtx (mode);
1323 /* If it is a commutative operator and the modes would match
1324 if we would swap the operands, we can save the conversions. */
1325 if (commutative_op)
1327 if (GET_MODE (op0) != mode0 && GET_MODE (op1) != mode1
1328 && GET_MODE (op0) == mode1 && GET_MODE (op1) == mode0)
1330 rtx tmp;
1332 tmp = op0; op0 = op1; op1 = tmp;
1333 tmp = xop0; xop0 = xop1; xop1 = tmp;
1337 /* In case the insn wants input operands in modes different from
1338 those of the actual operands, convert the operands. It would
1339 seem that we don't need to convert CONST_INTs, but we do, so
1340 that they're properly zero-extended, sign-extended or truncated
1341 for their mode. */
1343 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
1344 xop0 = convert_modes (mode0,
1345 GET_MODE (op0) != VOIDmode
1346 ? GET_MODE (op0)
1347 : mode,
1348 xop0, unsignedp);
1350 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
1351 xop1 = convert_modes (mode1,
1352 GET_MODE (op1) != VOIDmode
1353 ? GET_MODE (op1)
1354 : mode,
1355 xop1, unsignedp);
1357 /* Now, if insn's predicates don't allow our operands, put them into
1358 pseudo regs. */
1360 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
1361 && mode0 != VOIDmode)
1362 xop0 = copy_to_mode_reg (mode0, xop0);
1364 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
1365 && mode1 != VOIDmode)
1366 xop1 = copy_to_mode_reg (mode1, xop1);
1368 if (binoptab == vec_pack_mod_optab
1369 || binoptab == vec_pack_usat_optab
1370 || binoptab == vec_pack_ssat_optab)
1372 /* The mode of the result is different then the mode of the
1373 arguments. */
1374 tmp_mode = insn_data[icode].operand[0].mode;
1375 if (GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode))
1376 return 0;
1378 else
1379 tmp_mode = mode;
1381 if (!insn_data[icode].operand[0].predicate (temp, tmp_mode))
1382 temp = gen_reg_rtx (tmp_mode);
1384 pat = GEN_FCN (icode) (temp, xop0, xop1);
1385 if (pat)
1387 /* If PAT is composed of more than one insn, try to add an appropriate
1388 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1389 operand, call ourselves again, this time without a target. */
1390 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1391 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
1393 delete_insns_since (last);
1394 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1395 unsignedp, methods);
1398 emit_insn (pat);
1399 return temp;
1401 else
1402 delete_insns_since (last);
1405 /* If we were trying to rotate by a constant value, and that didn't
1406 work, try rotating the other direction before falling back to
1407 shifts and bitwise-or. */
1408 if (first_pass_p
1409 && (binoptab == rotl_optab || binoptab == rotr_optab)
1410 && class == MODE_INT
1411 && GET_CODE (op1) == CONST_INT
1412 && INTVAL (op1) > 0
1413 && (unsigned int) INTVAL (op1) < GET_MODE_BITSIZE (mode))
1415 first_pass_p = false;
1416 op1 = GEN_INT (GET_MODE_BITSIZE (mode) - INTVAL (op1));
1417 binoptab = binoptab == rotl_optab ? rotr_optab : rotl_optab;
1418 goto retry;
1421 /* If this is a multiply, see if we can do a widening operation that
1422 takes operands of this mode and makes a wider mode. */
1424 if (binoptab == smul_optab
1425 && GET_MODE_WIDER_MODE (mode) != VOIDmode
1426 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1427 ->handlers[(int) GET_MODE_WIDER_MODE (mode)].insn_code)
1428 != CODE_FOR_nothing))
1430 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
1431 unsignedp ? umul_widen_optab : smul_widen_optab,
1432 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1434 if (temp != 0)
1436 if (GET_MODE_CLASS (mode) == MODE_INT
1437 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1438 GET_MODE_BITSIZE (GET_MODE (temp))))
1439 return gen_lowpart (mode, temp);
1440 else
1441 return convert_to_mode (mode, temp, unsignedp);
1445 /* Look for a wider mode of the same class for which we think we
1446 can open-code the operation. Check for a widening multiply at the
1447 wider mode as well. */
1449 if (CLASS_HAS_WIDER_MODES_P (class)
1450 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1451 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1452 wider_mode != VOIDmode;
1453 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1455 if (binoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
1456 || (binoptab == smul_optab
1457 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1458 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1459 ->handlers[(int) GET_MODE_WIDER_MODE (wider_mode)].insn_code)
1460 != CODE_FOR_nothing)))
1462 rtx xop0 = op0, xop1 = op1;
1463 int no_extend = 0;
1465 /* For certain integer operations, we need not actually extend
1466 the narrow operands, as long as we will truncate
1467 the results to the same narrowness. */
1469 if ((binoptab == ior_optab || binoptab == and_optab
1470 || binoptab == xor_optab
1471 || binoptab == add_optab || binoptab == sub_optab
1472 || binoptab == smul_optab || binoptab == ashl_optab)
1473 && class == MODE_INT)
1474 no_extend = 1;
1476 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1478 /* The second operand of a shift must always be extended. */
1479 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1480 no_extend && binoptab != ashl_optab);
1482 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1483 unsignedp, OPTAB_DIRECT);
1484 if (temp)
1486 if (class != MODE_INT
1487 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1488 GET_MODE_BITSIZE (wider_mode)))
1490 if (target == 0)
1491 target = gen_reg_rtx (mode);
1492 convert_move (target, temp, 0);
1493 return target;
1495 else
1496 return gen_lowpart (mode, temp);
1498 else
1499 delete_insns_since (last);
1503 /* These can be done a word at a time. */
1504 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1505 && class == MODE_INT
1506 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1507 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1509 int i;
1510 rtx insns;
1511 rtx equiv_value;
1513 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1514 won't be accurate, so use a new target. */
1515 if (target == 0 || target == op0 || target == op1)
1516 target = gen_reg_rtx (mode);
1518 start_sequence ();
1520 /* Do the actual arithmetic. */
1521 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1523 rtx target_piece = operand_subword (target, i, 1, mode);
1524 rtx x = expand_binop (word_mode, binoptab,
1525 operand_subword_force (op0, i, mode),
1526 operand_subword_force (op1, i, mode),
1527 target_piece, unsignedp, next_methods);
1529 if (x == 0)
1530 break;
1532 if (target_piece != x)
1533 emit_move_insn (target_piece, x);
1536 insns = get_insns ();
1537 end_sequence ();
1539 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1541 if (binoptab->code != UNKNOWN)
1542 equiv_value
1543 = gen_rtx_fmt_ee (binoptab->code, mode,
1544 copy_rtx (op0), copy_rtx (op1));
1545 else
1546 equiv_value = 0;
1548 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1549 return target;
1553 /* Synthesize double word shifts from single word shifts. */
1554 if ((binoptab == lshr_optab || binoptab == ashl_optab
1555 || binoptab == ashr_optab)
1556 && class == MODE_INT
1557 && (GET_CODE (op1) == CONST_INT || !optimize_size)
1558 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1559 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1560 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1561 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1563 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1564 enum machine_mode op1_mode;
1566 double_shift_mask = targetm.shift_truncation_mask (mode);
1567 shift_mask = targetm.shift_truncation_mask (word_mode);
1568 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1570 /* Apply the truncation to constant shifts. */
1571 if (double_shift_mask > 0 && GET_CODE (op1) == CONST_INT)
1572 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1574 if (op1 == CONST0_RTX (op1_mode))
1575 return op0;
1577 /* Make sure that this is a combination that expand_doubleword_shift
1578 can handle. See the comments there for details. */
1579 if (double_shift_mask == 0
1580 || (shift_mask == BITS_PER_WORD - 1
1581 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1583 rtx insns, equiv_value;
1584 rtx into_target, outof_target;
1585 rtx into_input, outof_input;
1586 int left_shift, outof_word;
1588 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1589 won't be accurate, so use a new target. */
1590 if (target == 0 || target == op0 || target == op1)
1591 target = gen_reg_rtx (mode);
1593 start_sequence ();
1595 /* OUTOF_* is the word we are shifting bits away from, and
1596 INTO_* is the word that we are shifting bits towards, thus
1597 they differ depending on the direction of the shift and
1598 WORDS_BIG_ENDIAN. */
1600 left_shift = binoptab == ashl_optab;
1601 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1603 outof_target = operand_subword (target, outof_word, 1, mode);
1604 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1606 outof_input = operand_subword_force (op0, outof_word, mode);
1607 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1609 if (expand_doubleword_shift (op1_mode, binoptab,
1610 outof_input, into_input, op1,
1611 outof_target, into_target,
1612 unsignedp, next_methods, shift_mask))
1614 insns = get_insns ();
1615 end_sequence ();
1617 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1618 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1619 return target;
1621 end_sequence ();
1625 /* Synthesize double word rotates from single word shifts. */
1626 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1627 && class == MODE_INT
1628 && GET_CODE (op1) == CONST_INT
1629 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1630 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1631 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1633 rtx insns;
1634 rtx into_target, outof_target;
1635 rtx into_input, outof_input;
1636 rtx inter;
1637 int shift_count, left_shift, outof_word;
1639 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1640 won't be accurate, so use a new target. Do this also if target is not
1641 a REG, first because having a register instead may open optimization
1642 opportunities, and second because if target and op0 happen to be MEMs
1643 designating the same location, we would risk clobbering it too early
1644 in the code sequence we generate below. */
1645 if (target == 0 || target == op0 || target == op1 || ! REG_P (target))
1646 target = gen_reg_rtx (mode);
1648 start_sequence ();
1650 shift_count = INTVAL (op1);
1652 /* OUTOF_* is the word we are shifting bits away from, and
1653 INTO_* is the word that we are shifting bits towards, thus
1654 they differ depending on the direction of the shift and
1655 WORDS_BIG_ENDIAN. */
1657 left_shift = (binoptab == rotl_optab);
1658 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1660 outof_target = operand_subword (target, outof_word, 1, mode);
1661 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1663 outof_input = operand_subword_force (op0, outof_word, mode);
1664 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1666 if (shift_count == BITS_PER_WORD)
1668 /* This is just a word swap. */
1669 emit_move_insn (outof_target, into_input);
1670 emit_move_insn (into_target, outof_input);
1671 inter = const0_rtx;
1673 else
1675 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1676 rtx first_shift_count, second_shift_count;
1677 optab reverse_unsigned_shift, unsigned_shift;
1679 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1680 ? lshr_optab : ashl_optab);
1682 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1683 ? ashl_optab : lshr_optab);
1685 if (shift_count > BITS_PER_WORD)
1687 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1688 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1690 else
1692 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1693 second_shift_count = GEN_INT (shift_count);
1696 into_temp1 = expand_binop (word_mode, unsigned_shift,
1697 outof_input, first_shift_count,
1698 NULL_RTX, unsignedp, next_methods);
1699 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1700 into_input, second_shift_count,
1701 NULL_RTX, unsignedp, next_methods);
1703 if (into_temp1 != 0 && into_temp2 != 0)
1704 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1705 into_target, unsignedp, next_methods);
1706 else
1707 inter = 0;
1709 if (inter != 0 && inter != into_target)
1710 emit_move_insn (into_target, inter);
1712 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1713 into_input, first_shift_count,
1714 NULL_RTX, unsignedp, next_methods);
1715 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1716 outof_input, second_shift_count,
1717 NULL_RTX, unsignedp, next_methods);
1719 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1720 inter = expand_binop (word_mode, ior_optab,
1721 outof_temp1, outof_temp2,
1722 outof_target, unsignedp, next_methods);
1724 if (inter != 0 && inter != outof_target)
1725 emit_move_insn (outof_target, inter);
1728 insns = get_insns ();
1729 end_sequence ();
1731 if (inter != 0)
1733 /* One may be tempted to wrap the insns in a REG_NO_CONFLICT
1734 block to help the register allocator a bit. But a multi-word
1735 rotate will need all the input bits when setting the output
1736 bits, so there clearly is a conflict between the input and
1737 output registers. So we can't use a no-conflict block here. */
1738 emit_insn (insns);
1739 return target;
1743 /* These can be done a word at a time by propagating carries. */
1744 if ((binoptab == add_optab || binoptab == sub_optab)
1745 && class == MODE_INT
1746 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1747 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1749 unsigned int i;
1750 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1751 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1752 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1753 rtx xop0, xop1, xtarget;
1755 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1756 value is one of those, use it. Otherwise, use 1 since it is the
1757 one easiest to get. */
1758 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1759 int normalizep = STORE_FLAG_VALUE;
1760 #else
1761 int normalizep = 1;
1762 #endif
1764 /* Prepare the operands. */
1765 xop0 = force_reg (mode, op0);
1766 xop1 = force_reg (mode, op1);
1768 xtarget = gen_reg_rtx (mode);
1770 if (target == 0 || !REG_P (target))
1771 target = xtarget;
1773 /* Indicate for flow that the entire target reg is being set. */
1774 if (REG_P (target))
1775 emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget));
1777 /* Do the actual arithmetic. */
1778 for (i = 0; i < nwords; i++)
1780 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1781 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1782 rtx op0_piece = operand_subword_force (xop0, index, mode);
1783 rtx op1_piece = operand_subword_force (xop1, index, mode);
1784 rtx x;
1786 /* Main add/subtract of the input operands. */
1787 x = expand_binop (word_mode, binoptab,
1788 op0_piece, op1_piece,
1789 target_piece, unsignedp, next_methods);
1790 if (x == 0)
1791 break;
1793 if (i + 1 < nwords)
1795 /* Store carry from main add/subtract. */
1796 carry_out = gen_reg_rtx (word_mode);
1797 carry_out = emit_store_flag_force (carry_out,
1798 (binoptab == add_optab
1799 ? LT : GT),
1800 x, op0_piece,
1801 word_mode, 1, normalizep);
1804 if (i > 0)
1806 rtx newx;
1808 /* Add/subtract previous carry to main result. */
1809 newx = expand_binop (word_mode,
1810 normalizep == 1 ? binoptab : otheroptab,
1811 x, carry_in,
1812 NULL_RTX, 1, next_methods);
1814 if (i + 1 < nwords)
1816 /* Get out carry from adding/subtracting carry in. */
1817 rtx carry_tmp = gen_reg_rtx (word_mode);
1818 carry_tmp = emit_store_flag_force (carry_tmp,
1819 (binoptab == add_optab
1820 ? LT : GT),
1821 newx, x,
1822 word_mode, 1, normalizep);
1824 /* Logical-ior the two poss. carry together. */
1825 carry_out = expand_binop (word_mode, ior_optab,
1826 carry_out, carry_tmp,
1827 carry_out, 0, next_methods);
1828 if (carry_out == 0)
1829 break;
1831 emit_move_insn (target_piece, newx);
1833 else
1835 if (x != target_piece)
1836 emit_move_insn (target_piece, x);
1839 carry_in = carry_out;
1842 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
1844 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
1845 || ! rtx_equal_p (target, xtarget))
1847 rtx temp = emit_move_insn (target, xtarget);
1849 set_unique_reg_note (temp,
1850 REG_EQUAL,
1851 gen_rtx_fmt_ee (binoptab->code, mode,
1852 copy_rtx (xop0),
1853 copy_rtx (xop1)));
1855 else
1856 target = xtarget;
1858 return target;
1861 else
1862 delete_insns_since (last);
1865 /* Attempt to synthesize double word multiplies using a sequence of word
1866 mode multiplications. We first attempt to generate a sequence using a
1867 more efficient unsigned widening multiply, and if that fails we then
1868 try using a signed widening multiply. */
1870 if (binoptab == smul_optab
1871 && class == MODE_INT
1872 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1873 && smul_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1874 && add_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1876 rtx product = NULL_RTX;
1878 if (umul_widen_optab->handlers[(int) mode].insn_code
1879 != CODE_FOR_nothing)
1881 product = expand_doubleword_mult (mode, op0, op1, target,
1882 true, methods);
1883 if (!product)
1884 delete_insns_since (last);
1887 if (product == NULL_RTX
1888 && smul_widen_optab->handlers[(int) mode].insn_code
1889 != CODE_FOR_nothing)
1891 product = expand_doubleword_mult (mode, op0, op1, target,
1892 false, methods);
1893 if (!product)
1894 delete_insns_since (last);
1897 if (product != NULL_RTX)
1899 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1901 temp = emit_move_insn (target ? target : product, product);
1902 set_unique_reg_note (temp,
1903 REG_EQUAL,
1904 gen_rtx_fmt_ee (MULT, mode,
1905 copy_rtx (op0),
1906 copy_rtx (op1)));
1908 return product;
1912 /* It can't be open-coded in this mode.
1913 Use a library call if one is available and caller says that's ok. */
1915 if (binoptab->handlers[(int) mode].libfunc
1916 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1918 rtx insns;
1919 rtx op1x = op1;
1920 enum machine_mode op1_mode = mode;
1921 rtx value;
1923 start_sequence ();
1925 if (shift_op)
1927 op1_mode = word_mode;
1928 /* Specify unsigned here,
1929 since negative shift counts are meaningless. */
1930 op1x = convert_to_mode (word_mode, op1, 1);
1933 if (GET_MODE (op0) != VOIDmode
1934 && GET_MODE (op0) != mode)
1935 op0 = convert_to_mode (mode, op0, unsignedp);
1937 /* Pass 1 for NO_QUEUE so we don't lose any increments
1938 if the libcall is cse'd or moved. */
1939 value = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
1940 NULL_RTX, LCT_CONST, mode, 2,
1941 op0, mode, op1x, op1_mode);
1943 insns = get_insns ();
1944 end_sequence ();
1946 target = gen_reg_rtx (mode);
1947 emit_libcall_block (insns, target, value,
1948 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
1950 return target;
1953 delete_insns_since (last);
1955 /* It can't be done in this mode. Can we do it in a wider mode? */
1957 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
1958 || methods == OPTAB_MUST_WIDEN))
1960 /* Caller says, don't even try. */
1961 delete_insns_since (entry_last);
1962 return 0;
1965 /* Compute the value of METHODS to pass to recursive calls.
1966 Don't allow widening to be tried recursively. */
1968 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
1970 /* Look for a wider mode of the same class for which it appears we can do
1971 the operation. */
1973 if (CLASS_HAS_WIDER_MODES_P (class))
1975 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1976 wider_mode != VOIDmode;
1977 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1979 if ((binoptab->handlers[(int) wider_mode].insn_code
1980 != CODE_FOR_nothing)
1981 || (methods == OPTAB_LIB
1982 && binoptab->handlers[(int) wider_mode].libfunc))
1984 rtx xop0 = op0, xop1 = op1;
1985 int no_extend = 0;
1987 /* For certain integer operations, we need not actually extend
1988 the narrow operands, as long as we will truncate
1989 the results to the same narrowness. */
1991 if ((binoptab == ior_optab || binoptab == and_optab
1992 || binoptab == xor_optab
1993 || binoptab == add_optab || binoptab == sub_optab
1994 || binoptab == smul_optab || binoptab == ashl_optab)
1995 && class == MODE_INT)
1996 no_extend = 1;
1998 xop0 = widen_operand (xop0, wider_mode, mode,
1999 unsignedp, no_extend);
2001 /* The second operand of a shift must always be extended. */
2002 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
2003 no_extend && binoptab != ashl_optab);
2005 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
2006 unsignedp, methods);
2007 if (temp)
2009 if (class != MODE_INT
2010 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2011 GET_MODE_BITSIZE (wider_mode)))
2013 if (target == 0)
2014 target = gen_reg_rtx (mode);
2015 convert_move (target, temp, 0);
2016 return target;
2018 else
2019 return gen_lowpart (mode, temp);
2021 else
2022 delete_insns_since (last);
2027 delete_insns_since (entry_last);
2028 return 0;
2031 /* Expand a binary operator which has both signed and unsigned forms.
2032 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2033 signed operations.
2035 If we widen unsigned operands, we may use a signed wider operation instead
2036 of an unsigned wider operation, since the result would be the same. */
2039 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
2040 rtx op0, rtx op1, rtx target, int unsignedp,
2041 enum optab_methods methods)
2043 rtx temp;
2044 optab direct_optab = unsignedp ? uoptab : soptab;
2045 struct optab wide_soptab;
2047 /* Do it without widening, if possible. */
2048 temp = expand_binop (mode, direct_optab, op0, op1, target,
2049 unsignedp, OPTAB_DIRECT);
2050 if (temp || methods == OPTAB_DIRECT)
2051 return temp;
2053 /* Try widening to a signed int. Make a fake signed optab that
2054 hides any signed insn for direct use. */
2055 wide_soptab = *soptab;
2056 wide_soptab.handlers[(int) mode].insn_code = CODE_FOR_nothing;
2057 wide_soptab.handlers[(int) mode].libfunc = 0;
2059 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2060 unsignedp, OPTAB_WIDEN);
2062 /* For unsigned operands, try widening to an unsigned int. */
2063 if (temp == 0 && unsignedp)
2064 temp = expand_binop (mode, uoptab, op0, op1, target,
2065 unsignedp, OPTAB_WIDEN);
2066 if (temp || methods == OPTAB_WIDEN)
2067 return temp;
2069 /* Use the right width lib call if that exists. */
2070 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
2071 if (temp || methods == OPTAB_LIB)
2072 return temp;
2074 /* Must widen and use a lib call, use either signed or unsigned. */
2075 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2076 unsignedp, methods);
2077 if (temp != 0)
2078 return temp;
2079 if (unsignedp)
2080 return expand_binop (mode, uoptab, op0, op1, target,
2081 unsignedp, methods);
2082 return 0;
2085 /* Generate code to perform an operation specified by UNOPPTAB
2086 on operand OP0, with two results to TARG0 and TARG1.
2087 We assume that the order of the operands for the instruction
2088 is TARG0, TARG1, OP0.
2090 Either TARG0 or TARG1 may be zero, but what that means is that
2091 the result is not actually wanted. We will generate it into
2092 a dummy pseudo-reg and discard it. They may not both be zero.
2094 Returns 1 if this operation can be performed; 0 if not. */
2097 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
2098 int unsignedp)
2100 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2101 enum mode_class class;
2102 enum machine_mode wider_mode;
2103 rtx entry_last = get_last_insn ();
2104 rtx last;
2106 class = GET_MODE_CLASS (mode);
2108 if (!targ0)
2109 targ0 = gen_reg_rtx (mode);
2110 if (!targ1)
2111 targ1 = gen_reg_rtx (mode);
2113 /* Record where to go back to if we fail. */
2114 last = get_last_insn ();
2116 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2118 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2119 enum machine_mode mode0 = insn_data[icode].operand[2].mode;
2120 rtx pat;
2121 rtx xop0 = op0;
2123 if (GET_MODE (xop0) != VOIDmode
2124 && GET_MODE (xop0) != mode0)
2125 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2127 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2128 if (!insn_data[icode].operand[2].predicate (xop0, mode0))
2129 xop0 = copy_to_mode_reg (mode0, xop0);
2131 /* We could handle this, but we should always be called with a pseudo
2132 for our targets and all insns should take them as outputs. */
2133 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2134 gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
2136 pat = GEN_FCN (icode) (targ0, targ1, xop0);
2137 if (pat)
2139 emit_insn (pat);
2140 return 1;
2142 else
2143 delete_insns_since (last);
2146 /* It can't be done in this mode. Can we do it in a wider mode? */
2148 if (CLASS_HAS_WIDER_MODES_P (class))
2150 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2151 wider_mode != VOIDmode;
2152 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2154 if (unoptab->handlers[(int) wider_mode].insn_code
2155 != CODE_FOR_nothing)
2157 rtx t0 = gen_reg_rtx (wider_mode);
2158 rtx t1 = gen_reg_rtx (wider_mode);
2159 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2161 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
2163 convert_move (targ0, t0, unsignedp);
2164 convert_move (targ1, t1, unsignedp);
2165 return 1;
2167 else
2168 delete_insns_since (last);
2173 delete_insns_since (entry_last);
2174 return 0;
2177 /* Generate code to perform an operation specified by BINOPTAB
2178 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2179 We assume that the order of the operands for the instruction
2180 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2181 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2183 Either TARG0 or TARG1 may be zero, but what that means is that
2184 the result is not actually wanted. We will generate it into
2185 a dummy pseudo-reg and discard it. They may not both be zero.
2187 Returns 1 if this operation can be performed; 0 if not. */
2190 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2191 int unsignedp)
2193 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2194 enum mode_class class;
2195 enum machine_mode wider_mode;
2196 rtx entry_last = get_last_insn ();
2197 rtx last;
2199 class = GET_MODE_CLASS (mode);
2201 /* If we are inside an appropriately-short loop and we are optimizing,
2202 force expensive constants into a register. */
2203 if (CONSTANT_P (op0) && optimize
2204 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
2205 op0 = force_reg (mode, op0);
2207 if (CONSTANT_P (op1) && optimize
2208 && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
2209 op1 = force_reg (mode, op1);
2211 if (!targ0)
2212 targ0 = gen_reg_rtx (mode);
2213 if (!targ1)
2214 targ1 = gen_reg_rtx (mode);
2216 /* Record where to go back to if we fail. */
2217 last = get_last_insn ();
2219 if (binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2221 int icode = (int) binoptab->handlers[(int) mode].insn_code;
2222 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2223 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
2224 rtx pat;
2225 rtx xop0 = op0, xop1 = op1;
2227 /* In case the insn wants input operands in modes different from
2228 those of the actual operands, convert the operands. It would
2229 seem that we don't need to convert CONST_INTs, but we do, so
2230 that they're properly zero-extended, sign-extended or truncated
2231 for their mode. */
2233 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
2234 xop0 = convert_modes (mode0,
2235 GET_MODE (op0) != VOIDmode
2236 ? GET_MODE (op0)
2237 : mode,
2238 xop0, unsignedp);
2240 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
2241 xop1 = convert_modes (mode1,
2242 GET_MODE (op1) != VOIDmode
2243 ? GET_MODE (op1)
2244 : mode,
2245 xop1, unsignedp);
2247 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2248 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2249 xop0 = copy_to_mode_reg (mode0, xop0);
2251 if (!insn_data[icode].operand[2].predicate (xop1, mode1))
2252 xop1 = copy_to_mode_reg (mode1, xop1);
2254 /* We could handle this, but we should always be called with a pseudo
2255 for our targets and all insns should take them as outputs. */
2256 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2257 gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
2259 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
2260 if (pat)
2262 emit_insn (pat);
2263 return 1;
2265 else
2266 delete_insns_since (last);
2269 /* It can't be done in this mode. Can we do it in a wider mode? */
2271 if (CLASS_HAS_WIDER_MODES_P (class))
2273 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2274 wider_mode != VOIDmode;
2275 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2277 if (binoptab->handlers[(int) wider_mode].insn_code
2278 != CODE_FOR_nothing)
2280 rtx t0 = gen_reg_rtx (wider_mode);
2281 rtx t1 = gen_reg_rtx (wider_mode);
2282 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2283 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2285 if (expand_twoval_binop (binoptab, cop0, cop1,
2286 t0, t1, unsignedp))
2288 convert_move (targ0, t0, unsignedp);
2289 convert_move (targ1, t1, unsignedp);
2290 return 1;
2292 else
2293 delete_insns_since (last);
2298 delete_insns_since (entry_last);
2299 return 0;
2302 /* Expand the two-valued library call indicated by BINOPTAB, but
2303 preserve only one of the values. If TARG0 is non-NULL, the first
2304 value is placed into TARG0; otherwise the second value is placed
2305 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2306 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2307 This routine assumes that the value returned by the library call is
2308 as if the return value was of an integral mode twice as wide as the
2309 mode of OP0. Returns 1 if the call was successful. */
2311 bool
2312 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2313 rtx targ0, rtx targ1, enum rtx_code code)
2315 enum machine_mode mode;
2316 enum machine_mode libval_mode;
2317 rtx libval;
2318 rtx insns;
2320 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2321 gcc_assert (!targ0 != !targ1);
2323 mode = GET_MODE (op0);
2324 if (!binoptab->handlers[(int) mode].libfunc)
2325 return false;
2327 /* The value returned by the library function will have twice as
2328 many bits as the nominal MODE. */
2329 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2330 MODE_INT);
2331 start_sequence ();
2332 libval = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
2333 NULL_RTX, LCT_CONST,
2334 libval_mode, 2,
2335 op0, mode,
2336 op1, mode);
2337 /* Get the part of VAL containing the value that we want. */
2338 libval = simplify_gen_subreg (mode, libval, libval_mode,
2339 targ0 ? 0 : GET_MODE_SIZE (mode));
2340 insns = get_insns ();
2341 end_sequence ();
2342 /* Move the into the desired location. */
2343 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2344 gen_rtx_fmt_ee (code, mode, op0, op1));
2346 return true;
2350 /* Wrapper around expand_unop which takes an rtx code to specify
2351 the operation to perform, not an optab pointer. All other
2352 arguments are the same. */
2354 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2355 rtx target, int unsignedp)
2357 optab unop = code_to_optab[(int) code];
2358 gcc_assert (unop);
2360 return expand_unop (mode, unop, op0, target, unsignedp);
2363 /* Try calculating
2364 (clz:narrow x)
2366 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2367 static rtx
2368 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2370 enum mode_class class = GET_MODE_CLASS (mode);
2371 if (CLASS_HAS_WIDER_MODES_P (class))
2373 enum machine_mode wider_mode;
2374 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2375 wider_mode != VOIDmode;
2376 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2378 if (clz_optab->handlers[(int) wider_mode].insn_code
2379 != CODE_FOR_nothing)
2381 rtx xop0, temp, last;
2383 last = get_last_insn ();
2385 if (target == 0)
2386 target = gen_reg_rtx (mode);
2387 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2388 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2389 if (temp != 0)
2390 temp = expand_binop (wider_mode, sub_optab, temp,
2391 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2392 - GET_MODE_BITSIZE (mode)),
2393 target, true, OPTAB_DIRECT);
2394 if (temp == 0)
2395 delete_insns_since (last);
2397 return temp;
2401 return 0;
2404 /* Try calculating (parity x) as (and (popcount x) 1), where
2405 popcount can also be done in a wider mode. */
2406 static rtx
2407 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2409 enum mode_class class = GET_MODE_CLASS (mode);
2410 if (CLASS_HAS_WIDER_MODES_P (class))
2412 enum machine_mode wider_mode;
2413 for (wider_mode = mode; wider_mode != VOIDmode;
2414 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2416 if (popcount_optab->handlers[(int) wider_mode].insn_code
2417 != CODE_FOR_nothing)
2419 rtx xop0, temp, last;
2421 last = get_last_insn ();
2423 if (target == 0)
2424 target = gen_reg_rtx (mode);
2425 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2426 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2427 true);
2428 if (temp != 0)
2429 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2430 target, true, OPTAB_DIRECT);
2431 if (temp == 0)
2432 delete_insns_since (last);
2434 return temp;
2438 return 0;
2441 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2442 conditions, VAL may already be a SUBREG against which we cannot generate
2443 a further SUBREG. In this case, we expect forcing the value into a
2444 register will work around the situation. */
2446 static rtx
2447 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2448 enum machine_mode imode)
2450 rtx ret;
2451 ret = lowpart_subreg (omode, val, imode);
2452 if (ret == NULL)
2454 val = force_reg (imode, val);
2455 ret = lowpart_subreg (omode, val, imode);
2456 gcc_assert (ret != NULL);
2458 return ret;
2461 /* Expand a floating point absolute value or negation operation via a
2462 logical operation on the sign bit. */
2464 static rtx
2465 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2466 rtx op0, rtx target)
2468 const struct real_format *fmt;
2469 int bitpos, word, nwords, i;
2470 enum machine_mode imode;
2471 HOST_WIDE_INT hi, lo;
2472 rtx temp, insns;
2474 /* The format has to have a simple sign bit. */
2475 fmt = REAL_MODE_FORMAT (mode);
2476 if (fmt == NULL)
2477 return NULL_RTX;
2479 bitpos = fmt->signbit_rw;
2480 if (bitpos < 0)
2481 return NULL_RTX;
2483 /* Don't create negative zeros if the format doesn't support them. */
2484 if (code == NEG && !fmt->has_signed_zero)
2485 return NULL_RTX;
2487 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2489 imode = int_mode_for_mode (mode);
2490 if (imode == BLKmode)
2491 return NULL_RTX;
2492 word = 0;
2493 nwords = 1;
2495 else
2497 imode = word_mode;
2499 if (FLOAT_WORDS_BIG_ENDIAN)
2500 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2501 else
2502 word = bitpos / BITS_PER_WORD;
2503 bitpos = bitpos % BITS_PER_WORD;
2504 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2507 if (bitpos < HOST_BITS_PER_WIDE_INT)
2509 hi = 0;
2510 lo = (HOST_WIDE_INT) 1 << bitpos;
2512 else
2514 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2515 lo = 0;
2517 if (code == ABS)
2518 lo = ~lo, hi = ~hi;
2520 if (target == 0 || target == op0)
2521 target = gen_reg_rtx (mode);
2523 if (nwords > 1)
2525 start_sequence ();
2527 for (i = 0; i < nwords; ++i)
2529 rtx targ_piece = operand_subword (target, i, 1, mode);
2530 rtx op0_piece = operand_subword_force (op0, i, mode);
2532 if (i == word)
2534 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2535 op0_piece,
2536 immed_double_const (lo, hi, imode),
2537 targ_piece, 1, OPTAB_LIB_WIDEN);
2538 if (temp != targ_piece)
2539 emit_move_insn (targ_piece, temp);
2541 else
2542 emit_move_insn (targ_piece, op0_piece);
2545 insns = get_insns ();
2546 end_sequence ();
2548 temp = gen_rtx_fmt_e (code, mode, copy_rtx (op0));
2549 emit_no_conflict_block (insns, target, op0, NULL_RTX, temp);
2551 else
2553 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2554 gen_lowpart (imode, op0),
2555 immed_double_const (lo, hi, imode),
2556 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2557 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2559 set_unique_reg_note (get_last_insn (), REG_EQUAL,
2560 gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
2563 return target;
2566 /* Generate code to perform an operation specified by UNOPTAB
2567 on operand OP0, with result having machine-mode MODE.
2569 UNSIGNEDP is for the case where we have to widen the operands
2570 to perform the operation. It says to use zero-extension.
2572 If TARGET is nonzero, the value
2573 is generated there, if it is convenient to do so.
2574 In all cases an rtx is returned for the locus of the value;
2575 this may or may not be TARGET. */
2578 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
2579 int unsignedp)
2581 enum mode_class class;
2582 enum machine_mode wider_mode;
2583 rtx temp;
2584 rtx last = get_last_insn ();
2585 rtx pat;
2587 class = GET_MODE_CLASS (mode);
2589 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2591 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2592 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2593 rtx xop0 = op0;
2595 if (target)
2596 temp = target;
2597 else
2598 temp = gen_reg_rtx (mode);
2600 if (GET_MODE (xop0) != VOIDmode
2601 && GET_MODE (xop0) != mode0)
2602 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2604 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2606 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2607 xop0 = copy_to_mode_reg (mode0, xop0);
2609 if (!insn_data[icode].operand[0].predicate (temp, mode))
2610 temp = gen_reg_rtx (mode);
2612 pat = GEN_FCN (icode) (temp, xop0);
2613 if (pat)
2615 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2616 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
2618 delete_insns_since (last);
2619 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2622 emit_insn (pat);
2624 return temp;
2626 else
2627 delete_insns_since (last);
2630 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2632 /* Widening clz needs special treatment. */
2633 if (unoptab == clz_optab)
2635 temp = widen_clz (mode, op0, target);
2636 if (temp)
2637 return temp;
2638 else
2639 goto try_libcall;
2642 /* We can't widen a bswap. */
2643 if (unoptab == bswap_optab)
2644 goto try_libcall;
2646 if (CLASS_HAS_WIDER_MODES_P (class))
2647 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2648 wider_mode != VOIDmode;
2649 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2651 if (unoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing)
2653 rtx xop0 = op0;
2655 /* For certain operations, we need not actually extend
2656 the narrow operand, as long as we will truncate the
2657 results to the same narrowness. */
2659 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2660 (unoptab == neg_optab
2661 || unoptab == one_cmpl_optab)
2662 && class == MODE_INT);
2664 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2665 unsignedp);
2667 if (temp)
2669 if (class != MODE_INT
2670 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2671 GET_MODE_BITSIZE (wider_mode)))
2673 if (target == 0)
2674 target = gen_reg_rtx (mode);
2675 convert_move (target, temp, 0);
2676 return target;
2678 else
2679 return gen_lowpart (mode, temp);
2681 else
2682 delete_insns_since (last);
2686 /* These can be done a word at a time. */
2687 if (unoptab == one_cmpl_optab
2688 && class == MODE_INT
2689 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
2690 && unoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
2692 int i;
2693 rtx insns;
2695 if (target == 0 || target == op0)
2696 target = gen_reg_rtx (mode);
2698 start_sequence ();
2700 /* Do the actual arithmetic. */
2701 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
2703 rtx target_piece = operand_subword (target, i, 1, mode);
2704 rtx x = expand_unop (word_mode, unoptab,
2705 operand_subword_force (op0, i, mode),
2706 target_piece, unsignedp);
2708 if (target_piece != x)
2709 emit_move_insn (target_piece, x);
2712 insns = get_insns ();
2713 end_sequence ();
2715 emit_no_conflict_block (insns, target, op0, NULL_RTX,
2716 gen_rtx_fmt_e (unoptab->code, mode,
2717 copy_rtx (op0)));
2718 return target;
2721 if (unoptab->code == NEG)
2723 /* Try negating floating point values by flipping the sign bit. */
2724 if (SCALAR_FLOAT_MODE_P (mode))
2726 temp = expand_absneg_bit (NEG, mode, op0, target);
2727 if (temp)
2728 return temp;
2731 /* If there is no negation pattern, and we have no negative zero,
2732 try subtracting from zero. */
2733 if (!HONOR_SIGNED_ZEROS (mode))
2735 temp = expand_binop (mode, (unoptab == negv_optab
2736 ? subv_optab : sub_optab),
2737 CONST0_RTX (mode), op0, target,
2738 unsignedp, OPTAB_DIRECT);
2739 if (temp)
2740 return temp;
2744 /* Try calculating parity (x) as popcount (x) % 2. */
2745 if (unoptab == parity_optab)
2747 temp = expand_parity (mode, op0, target);
2748 if (temp)
2749 return temp;
2752 try_libcall:
2753 /* Now try a library call in this mode. */
2754 if (unoptab->handlers[(int) mode].libfunc)
2756 rtx insns;
2757 rtx value;
2758 enum machine_mode outmode = mode;
2760 /* All of these functions return small values. Thus we choose to
2761 have them return something that isn't a double-word. */
2762 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
2763 || unoptab == popcount_optab || unoptab == parity_optab)
2764 outmode
2765 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node)));
2767 start_sequence ();
2769 /* Pass 1 for NO_QUEUE so we don't lose any increments
2770 if the libcall is cse'd or moved. */
2771 value = emit_library_call_value (unoptab->handlers[(int) mode].libfunc,
2772 NULL_RTX, LCT_CONST, outmode,
2773 1, op0, mode);
2774 insns = get_insns ();
2775 end_sequence ();
2777 target = gen_reg_rtx (outmode);
2778 emit_libcall_block (insns, target, value,
2779 gen_rtx_fmt_e (unoptab->code, outmode, op0));
2781 return target;
2784 /* It can't be done in this mode. Can we do it in a wider mode? */
2786 if (CLASS_HAS_WIDER_MODES_P (class))
2788 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2789 wider_mode != VOIDmode;
2790 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2792 if ((unoptab->handlers[(int) wider_mode].insn_code
2793 != CODE_FOR_nothing)
2794 || unoptab->handlers[(int) wider_mode].libfunc)
2796 rtx xop0 = op0;
2798 /* For certain operations, we need not actually extend
2799 the narrow operand, as long as we will truncate the
2800 results to the same narrowness. */
2802 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2803 (unoptab == neg_optab
2804 || unoptab == one_cmpl_optab)
2805 && class == MODE_INT);
2807 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2808 unsignedp);
2810 /* If we are generating clz using wider mode, adjust the
2811 result. */
2812 if (unoptab == clz_optab && temp != 0)
2813 temp = expand_binop (wider_mode, sub_optab, temp,
2814 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2815 - GET_MODE_BITSIZE (mode)),
2816 target, true, OPTAB_DIRECT);
2818 if (temp)
2820 if (class != MODE_INT)
2822 if (target == 0)
2823 target = gen_reg_rtx (mode);
2824 convert_move (target, temp, 0);
2825 return target;
2827 else
2828 return gen_lowpart (mode, temp);
2830 else
2831 delete_insns_since (last);
2836 /* One final attempt at implementing negation via subtraction,
2837 this time allowing widening of the operand. */
2838 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
2840 rtx temp;
2841 temp = expand_binop (mode,
2842 unoptab == negv_optab ? subv_optab : sub_optab,
2843 CONST0_RTX (mode), op0,
2844 target, unsignedp, OPTAB_LIB_WIDEN);
2845 if (temp)
2846 return temp;
2849 return 0;
2852 /* Emit code to compute the absolute value of OP0, with result to
2853 TARGET if convenient. (TARGET may be 0.) The return value says
2854 where the result actually is to be found.
2856 MODE is the mode of the operand; the mode of the result is
2857 different but can be deduced from MODE.
2862 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
2863 int result_unsignedp)
2865 rtx temp;
2867 if (! flag_trapv)
2868 result_unsignedp = 1;
2870 /* First try to do it with a special abs instruction. */
2871 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
2872 op0, target, 0);
2873 if (temp != 0)
2874 return temp;
2876 /* For floating point modes, try clearing the sign bit. */
2877 if (SCALAR_FLOAT_MODE_P (mode))
2879 temp = expand_absneg_bit (ABS, mode, op0, target);
2880 if (temp)
2881 return temp;
2884 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2885 if (smax_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
2886 && !HONOR_SIGNED_ZEROS (mode))
2888 rtx last = get_last_insn ();
2890 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
2891 if (temp != 0)
2892 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
2893 OPTAB_WIDEN);
2895 if (temp != 0)
2896 return temp;
2898 delete_insns_since (last);
2901 /* If this machine has expensive jumps, we can do integer absolute
2902 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2903 where W is the width of MODE. */
2905 if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
2907 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
2908 size_int (GET_MODE_BITSIZE (mode) - 1),
2909 NULL_RTX, 0);
2911 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
2912 OPTAB_LIB_WIDEN);
2913 if (temp != 0)
2914 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
2915 temp, extended, target, 0, OPTAB_LIB_WIDEN);
2917 if (temp != 0)
2918 return temp;
2921 return NULL_RTX;
2925 expand_abs (enum machine_mode mode, rtx op0, rtx target,
2926 int result_unsignedp, int safe)
2928 rtx temp, op1;
2930 if (! flag_trapv)
2931 result_unsignedp = 1;
2933 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
2934 if (temp != 0)
2935 return temp;
2937 /* If that does not win, use conditional jump and negate. */
2939 /* It is safe to use the target if it is the same
2940 as the source if this is also a pseudo register */
2941 if (op0 == target && REG_P (op0)
2942 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
2943 safe = 1;
2945 op1 = gen_label_rtx ();
2946 if (target == 0 || ! safe
2947 || GET_MODE (target) != mode
2948 || (MEM_P (target) && MEM_VOLATILE_P (target))
2949 || (REG_P (target)
2950 && REGNO (target) < FIRST_PSEUDO_REGISTER))
2951 target = gen_reg_rtx (mode);
2953 emit_move_insn (target, op0);
2954 NO_DEFER_POP;
2956 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
2957 NULL_RTX, NULL_RTX, op1);
2959 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
2960 target, target, 0);
2961 if (op0 != target)
2962 emit_move_insn (target, op0);
2963 emit_label (op1);
2964 OK_DEFER_POP;
2965 return target;
2968 /* A subroutine of expand_copysign, perform the copysign operation using the
2969 abs and neg primitives advertised to exist on the target. The assumption
2970 is that we have a split register file, and leaving op0 in fp registers,
2971 and not playing with subregs so much, will help the register allocator. */
2973 static rtx
2974 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
2975 int bitpos, bool op0_is_abs)
2977 enum machine_mode imode;
2978 HOST_WIDE_INT hi, lo;
2979 int word;
2980 rtx label;
2982 if (target == op1)
2983 target = NULL_RTX;
2985 if (!op0_is_abs)
2987 op0 = expand_unop (mode, abs_optab, op0, target, 0);
2988 if (op0 == NULL)
2989 return NULL_RTX;
2990 target = op0;
2992 else
2994 if (target == NULL_RTX)
2995 target = copy_to_reg (op0);
2996 else
2997 emit_move_insn (target, op0);
3000 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3002 imode = int_mode_for_mode (mode);
3003 if (imode == BLKmode)
3004 return NULL_RTX;
3005 op1 = gen_lowpart (imode, op1);
3007 else
3009 imode = word_mode;
3010 if (FLOAT_WORDS_BIG_ENDIAN)
3011 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3012 else
3013 word = bitpos / BITS_PER_WORD;
3014 bitpos = bitpos % BITS_PER_WORD;
3015 op1 = operand_subword_force (op1, word, mode);
3018 if (bitpos < HOST_BITS_PER_WIDE_INT)
3020 hi = 0;
3021 lo = (HOST_WIDE_INT) 1 << bitpos;
3023 else
3025 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3026 lo = 0;
3029 op1 = expand_binop (imode, and_optab, op1,
3030 immed_double_const (lo, hi, imode),
3031 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3033 label = gen_label_rtx ();
3034 emit_cmp_and_jump_insns (op1, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3036 if (GET_CODE (op0) == CONST_DOUBLE)
3037 op0 = simplify_unary_operation (NEG, mode, op0, mode);
3038 else
3039 op0 = expand_unop (mode, neg_optab, op0, target, 0);
3040 if (op0 != target)
3041 emit_move_insn (target, op0);
3043 emit_label (label);
3045 return target;
3049 /* A subroutine of expand_copysign, perform the entire copysign operation
3050 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3051 is true if op0 is known to have its sign bit clear. */
3053 static rtx
3054 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3055 int bitpos, bool op0_is_abs)
3057 enum machine_mode imode;
3058 HOST_WIDE_INT hi, lo;
3059 int word, nwords, i;
3060 rtx temp, insns;
3062 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3064 imode = int_mode_for_mode (mode);
3065 if (imode == BLKmode)
3066 return NULL_RTX;
3067 word = 0;
3068 nwords = 1;
3070 else
3072 imode = word_mode;
3074 if (FLOAT_WORDS_BIG_ENDIAN)
3075 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3076 else
3077 word = bitpos / BITS_PER_WORD;
3078 bitpos = bitpos % BITS_PER_WORD;
3079 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3082 if (bitpos < HOST_BITS_PER_WIDE_INT)
3084 hi = 0;
3085 lo = (HOST_WIDE_INT) 1 << bitpos;
3087 else
3089 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3090 lo = 0;
3093 if (target == 0 || target == op0 || target == op1)
3094 target = gen_reg_rtx (mode);
3096 if (nwords > 1)
3098 start_sequence ();
3100 for (i = 0; i < nwords; ++i)
3102 rtx targ_piece = operand_subword (target, i, 1, mode);
3103 rtx op0_piece = operand_subword_force (op0, i, mode);
3105 if (i == word)
3107 if (!op0_is_abs)
3108 op0_piece = expand_binop (imode, and_optab, op0_piece,
3109 immed_double_const (~lo, ~hi, imode),
3110 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3112 op1 = expand_binop (imode, and_optab,
3113 operand_subword_force (op1, i, mode),
3114 immed_double_const (lo, hi, imode),
3115 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3117 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3118 targ_piece, 1, OPTAB_LIB_WIDEN);
3119 if (temp != targ_piece)
3120 emit_move_insn (targ_piece, temp);
3122 else
3123 emit_move_insn (targ_piece, op0_piece);
3126 insns = get_insns ();
3127 end_sequence ();
3129 emit_no_conflict_block (insns, target, op0, op1, NULL_RTX);
3131 else
3133 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3134 immed_double_const (lo, hi, imode),
3135 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3137 op0 = gen_lowpart (imode, op0);
3138 if (!op0_is_abs)
3139 op0 = expand_binop (imode, and_optab, op0,
3140 immed_double_const (~lo, ~hi, imode),
3141 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3143 temp = expand_binop (imode, ior_optab, op0, op1,
3144 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3145 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3148 return target;
3151 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3152 scalar floating point mode. Return NULL if we do not know how to
3153 expand the operation inline. */
3156 expand_copysign (rtx op0, rtx op1, rtx target)
3158 enum machine_mode mode = GET_MODE (op0);
3159 const struct real_format *fmt;
3160 bool op0_is_abs;
3161 rtx temp;
3163 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3164 gcc_assert (GET_MODE (op1) == mode);
3166 /* First try to do it with a special instruction. */
3167 temp = expand_binop (mode, copysign_optab, op0, op1,
3168 target, 0, OPTAB_DIRECT);
3169 if (temp)
3170 return temp;
3172 fmt = REAL_MODE_FORMAT (mode);
3173 if (fmt == NULL || !fmt->has_signed_zero)
3174 return NULL_RTX;
3176 op0_is_abs = false;
3177 if (GET_CODE (op0) == CONST_DOUBLE)
3179 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3180 op0 = simplify_unary_operation (ABS, mode, op0, mode);
3181 op0_is_abs = true;
3184 if (fmt->signbit_ro >= 0
3185 && (GET_CODE (op0) == CONST_DOUBLE
3186 || (neg_optab->handlers[mode].insn_code != CODE_FOR_nothing
3187 && abs_optab->handlers[mode].insn_code != CODE_FOR_nothing)))
3189 temp = expand_copysign_absneg (mode, op0, op1, target,
3190 fmt->signbit_ro, op0_is_abs);
3191 if (temp)
3192 return temp;
3195 if (fmt->signbit_rw < 0)
3196 return NULL_RTX;
3197 return expand_copysign_bit (mode, op0, op1, target,
3198 fmt->signbit_rw, op0_is_abs);
3201 /* Generate an instruction whose insn-code is INSN_CODE,
3202 with two operands: an output TARGET and an input OP0.
3203 TARGET *must* be nonzero, and the output is always stored there.
3204 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3205 the value that is stored into TARGET. */
3207 void
3208 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
3210 rtx temp;
3211 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3212 rtx pat;
3214 temp = target;
3216 /* Now, if insn does not accept our operands, put them into pseudos. */
3218 if (!insn_data[icode].operand[1].predicate (op0, mode0))
3219 op0 = copy_to_mode_reg (mode0, op0);
3221 if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp)))
3222 temp = gen_reg_rtx (GET_MODE (temp));
3224 pat = GEN_FCN (icode) (temp, op0);
3226 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3227 add_equal_note (pat, temp, code, op0, NULL_RTX);
3229 emit_insn (pat);
3231 if (temp != target)
3232 emit_move_insn (target, temp);
3235 struct no_conflict_data
3237 rtx target, first, insn;
3238 bool must_stay;
3241 /* Called via note_stores by emit_no_conflict_block and emit_libcall_block.
3242 Set P->must_stay if the currently examined clobber / store has to stay
3243 in the list of insns that constitute the actual no_conflict block /
3244 libcall block. */
3245 static void
3246 no_conflict_move_test (rtx dest, rtx set, void *p0)
3248 struct no_conflict_data *p= p0;
3250 /* If this inns directly contributes to setting the target, it must stay. */
3251 if (reg_overlap_mentioned_p (p->target, dest))
3252 p->must_stay = true;
3253 /* If we haven't committed to keeping any other insns in the list yet,
3254 there is nothing more to check. */
3255 else if (p->insn == p->first)
3256 return;
3257 /* If this insn sets / clobbers a register that feeds one of the insns
3258 already in the list, this insn has to stay too. */
3259 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3260 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3261 || reg_used_between_p (dest, p->first, p->insn)
3262 /* Likewise if this insn depends on a register set by a previous
3263 insn in the list, or if it sets a result (presumably a hard
3264 register) that is set or clobbered by a previous insn.
3265 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3266 SET_DEST perform the former check on the address, and the latter
3267 check on the MEM. */
3268 || (GET_CODE (set) == SET
3269 && (modified_in_p (SET_SRC (set), p->first)
3270 || modified_in_p (SET_DEST (set), p->first)
3271 || modified_between_p (SET_SRC (set), p->first, p->insn)
3272 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3273 p->must_stay = true;
3276 /* Encapsulate the block starting at FIRST and ending with LAST, which is
3277 logically equivalent to EQUIV, so it gets manipulated as a unit if it
3278 is possible to do so. */
3280 static void
3281 maybe_encapsulate_block (rtx first, rtx last, rtx equiv)
3283 if (!flag_non_call_exceptions || !may_trap_p (equiv))
3285 /* We can't attach the REG_LIBCALL and REG_RETVAL notes when the
3286 encapsulated region would not be in one basic block, i.e. when
3287 there is a control_flow_insn_p insn between FIRST and LAST. */
3288 bool attach_libcall_retval_notes = true;
3289 rtx insn, next = NEXT_INSN (last);
3291 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3292 if (control_flow_insn_p (insn))
3294 attach_libcall_retval_notes = false;
3295 break;
3298 if (attach_libcall_retval_notes)
3300 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3301 REG_NOTES (first));
3302 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first,
3303 REG_NOTES (last));
3308 /* Emit code to perform a series of operations on a multi-word quantity, one
3309 word at a time.
3311 Such a block is preceded by a CLOBBER of the output, consists of multiple
3312 insns, each setting one word of the output, and followed by a SET copying
3313 the output to itself.
3315 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3316 note indicating that it doesn't conflict with the (also multi-word)
3317 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3318 notes.
3320 INSNS is a block of code generated to perform the operation, not including
3321 the CLOBBER and final copy. All insns that compute intermediate values
3322 are first emitted, followed by the block as described above.
3324 TARGET, OP0, and OP1 are the output and inputs of the operations,
3325 respectively. OP1 may be zero for a unary operation.
3327 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3328 on the last insn.
3330 If TARGET is not a register, INSNS is simply emitted with no special
3331 processing. Likewise if anything in INSNS is not an INSN or if
3332 there is a libcall block inside INSNS.
3334 The final insn emitted is returned. */
3337 emit_no_conflict_block (rtx insns, rtx target, rtx op0, rtx op1, rtx equiv)
3339 rtx prev, next, first, last, insn;
3341 if (!REG_P (target) || reload_in_progress)
3342 return emit_insn (insns);
3343 else
3344 for (insn = insns; insn; insn = NEXT_INSN (insn))
3345 if (!NONJUMP_INSN_P (insn)
3346 || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
3347 return emit_insn (insns);
3349 /* First emit all insns that do not store into words of the output and remove
3350 these from the list. */
3351 for (insn = insns; insn; insn = next)
3353 rtx note;
3354 struct no_conflict_data data;
3356 next = NEXT_INSN (insn);
3358 /* Some ports (cris) create a libcall regions at their own. We must
3359 avoid any potential nesting of LIBCALLs. */
3360 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3361 remove_note (insn, note);
3362 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3363 remove_note (insn, note);
3365 data.target = target;
3366 data.first = insns;
3367 data.insn = insn;
3368 data.must_stay = 0;
3369 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3370 if (! data.must_stay)
3372 if (PREV_INSN (insn))
3373 NEXT_INSN (PREV_INSN (insn)) = next;
3374 else
3375 insns = next;
3377 if (next)
3378 PREV_INSN (next) = PREV_INSN (insn);
3380 add_insn (insn);
3384 prev = get_last_insn ();
3386 /* Now write the CLOBBER of the output, followed by the setting of each
3387 of the words, followed by the final copy. */
3388 if (target != op0 && target != op1)
3389 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
3391 for (insn = insns; insn; insn = next)
3393 next = NEXT_INSN (insn);
3394 add_insn (insn);
3396 if (op1 && REG_P (op1))
3397 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op1,
3398 REG_NOTES (insn));
3400 if (op0 && REG_P (op0))
3401 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op0,
3402 REG_NOTES (insn));
3405 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3406 != CODE_FOR_nothing)
3408 last = emit_move_insn (target, target);
3409 if (equiv)
3410 set_unique_reg_note (last, REG_EQUAL, equiv);
3412 else
3414 last = get_last_insn ();
3416 /* Remove any existing REG_EQUAL note from "last", or else it will
3417 be mistaken for a note referring to the full contents of the
3418 alleged libcall value when found together with the REG_RETVAL
3419 note added below. An existing note can come from an insn
3420 expansion at "last". */
3421 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3424 if (prev == 0)
3425 first = get_insns ();
3426 else
3427 first = NEXT_INSN (prev);
3429 maybe_encapsulate_block (first, last, equiv);
3431 return last;
3434 /* Emit code to make a call to a constant function or a library call.
3436 INSNS is a list containing all insns emitted in the call.
3437 These insns leave the result in RESULT. Our block is to copy RESULT
3438 to TARGET, which is logically equivalent to EQUIV.
3440 We first emit any insns that set a pseudo on the assumption that these are
3441 loading constants into registers; doing so allows them to be safely cse'ed
3442 between blocks. Then we emit all the other insns in the block, followed by
3443 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3444 note with an operand of EQUIV.
3446 Moving assignments to pseudos outside of the block is done to improve
3447 the generated code, but is not required to generate correct code,
3448 hence being unable to move an assignment is not grounds for not making
3449 a libcall block. There are two reasons why it is safe to leave these
3450 insns inside the block: First, we know that these pseudos cannot be
3451 used in generated RTL outside the block since they are created for
3452 temporary purposes within the block. Second, CSE will not record the
3453 values of anything set inside a libcall block, so we know they must
3454 be dead at the end of the block.
3456 Except for the first group of insns (the ones setting pseudos), the
3457 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3459 void
3460 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3462 rtx final_dest = target;
3463 rtx prev, next, first, last, insn;
3465 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3466 into a MEM later. Protect the libcall block from this change. */
3467 if (! REG_P (target) || REG_USERVAR_P (target))
3468 target = gen_reg_rtx (GET_MODE (target));
3470 /* If we're using non-call exceptions, a libcall corresponding to an
3471 operation that may trap may also trap. */
3472 if (flag_non_call_exceptions && may_trap_p (equiv))
3474 for (insn = insns; insn; insn = NEXT_INSN (insn))
3475 if (CALL_P (insn))
3477 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3479 if (note != 0 && INTVAL (XEXP (note, 0)) <= 0)
3480 remove_note (insn, note);
3483 else
3484 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3485 reg note to indicate that this call cannot throw or execute a nonlocal
3486 goto (unless there is already a REG_EH_REGION note, in which case
3487 we update it). */
3488 for (insn = insns; insn; insn = NEXT_INSN (insn))
3489 if (CALL_P (insn))
3491 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3493 if (note != 0)
3494 XEXP (note, 0) = constm1_rtx;
3495 else
3496 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx,
3497 REG_NOTES (insn));
3500 /* First emit all insns that set pseudos. Remove them from the list as
3501 we go. Avoid insns that set pseudos which were referenced in previous
3502 insns. These can be generated by move_by_pieces, for example,
3503 to update an address. Similarly, avoid insns that reference things
3504 set in previous insns. */
3506 for (insn = insns; insn; insn = next)
3508 rtx set = single_set (insn);
3509 rtx note;
3511 /* Some ports (cris) create a libcall regions at their own. We must
3512 avoid any potential nesting of LIBCALLs. */
3513 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3514 remove_note (insn, note);
3515 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3516 remove_note (insn, note);
3518 next = NEXT_INSN (insn);
3520 if (set != 0 && REG_P (SET_DEST (set))
3521 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3523 struct no_conflict_data data;
3525 data.target = const0_rtx;
3526 data.first = insns;
3527 data.insn = insn;
3528 data.must_stay = 0;
3529 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3530 if (! data.must_stay)
3532 if (PREV_INSN (insn))
3533 NEXT_INSN (PREV_INSN (insn)) = next;
3534 else
3535 insns = next;
3537 if (next)
3538 PREV_INSN (next) = PREV_INSN (insn);
3540 add_insn (insn);
3544 /* Some ports use a loop to copy large arguments onto the stack.
3545 Don't move anything outside such a loop. */
3546 if (LABEL_P (insn))
3547 break;
3550 prev = get_last_insn ();
3552 /* Write the remaining insns followed by the final copy. */
3554 for (insn = insns; insn; insn = next)
3556 next = NEXT_INSN (insn);
3558 add_insn (insn);
3561 last = emit_move_insn (target, result);
3562 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3563 != CODE_FOR_nothing)
3564 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
3565 else
3567 /* Remove any existing REG_EQUAL note from "last", or else it will
3568 be mistaken for a note referring to the full contents of the
3569 libcall value when found together with the REG_RETVAL note added
3570 below. An existing note can come from an insn expansion at
3571 "last". */
3572 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3575 if (final_dest != target)
3576 emit_move_insn (final_dest, target);
3578 if (prev == 0)
3579 first = get_insns ();
3580 else
3581 first = NEXT_INSN (prev);
3583 maybe_encapsulate_block (first, last, equiv);
3586 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3587 PURPOSE describes how this comparison will be used. CODE is the rtx
3588 comparison code we will be using.
3590 ??? Actually, CODE is slightly weaker than that. A target is still
3591 required to implement all of the normal bcc operations, but not
3592 required to implement all (or any) of the unordered bcc operations. */
3595 can_compare_p (enum rtx_code code, enum machine_mode mode,
3596 enum can_compare_purpose purpose)
3600 if (cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3602 if (purpose == ccp_jump)
3603 return bcc_gen_fctn[(int) code] != NULL;
3604 else if (purpose == ccp_store_flag)
3605 return setcc_gen_code[(int) code] != CODE_FOR_nothing;
3606 else
3607 /* There's only one cmov entry point, and it's allowed to fail. */
3608 return 1;
3610 if (purpose == ccp_jump
3611 && cbranch_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3612 return 1;
3613 if (purpose == ccp_cmov
3614 && cmov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3615 return 1;
3616 if (purpose == ccp_store_flag
3617 && cstore_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3618 return 1;
3619 mode = GET_MODE_WIDER_MODE (mode);
3621 while (mode != VOIDmode);
3623 return 0;
3626 /* This function is called when we are going to emit a compare instruction that
3627 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3629 *PMODE is the mode of the inputs (in case they are const_int).
3630 *PUNSIGNEDP nonzero says that the operands are unsigned;
3631 this matters if they need to be widened.
3633 If they have mode BLKmode, then SIZE specifies the size of both operands.
3635 This function performs all the setup necessary so that the caller only has
3636 to emit a single comparison insn. This setup can involve doing a BLKmode
3637 comparison or emitting a library call to perform the comparison if no insn
3638 is available to handle it.
3639 The values which are passed in through pointers can be modified; the caller
3640 should perform the comparison on the modified values. Constant
3641 comparisons must have already been folded. */
3643 static void
3644 prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
3645 enum machine_mode *pmode, int *punsignedp,
3646 enum can_compare_purpose purpose)
3648 enum machine_mode mode = *pmode;
3649 rtx x = *px, y = *py;
3650 int unsignedp = *punsignedp;
3652 /* If we are inside an appropriately-short loop and we are optimizing,
3653 force expensive constants into a register. */
3654 if (CONSTANT_P (x) && optimize
3655 && rtx_cost (x, COMPARE) > COSTS_N_INSNS (1))
3656 x = force_reg (mode, x);
3658 if (CONSTANT_P (y) && optimize
3659 && rtx_cost (y, COMPARE) > COSTS_N_INSNS (1))
3660 y = force_reg (mode, y);
3662 #ifdef HAVE_cc0
3663 /* Make sure if we have a canonical comparison. The RTL
3664 documentation states that canonical comparisons are required only
3665 for targets which have cc0. */
3666 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3667 #endif
3669 /* Don't let both operands fail to indicate the mode. */
3670 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3671 x = force_reg (mode, x);
3673 /* Handle all BLKmode compares. */
3675 if (mode == BLKmode)
3677 enum machine_mode cmp_mode, result_mode;
3678 enum insn_code cmp_code;
3679 tree length_type;
3680 rtx libfunc;
3681 rtx result;
3682 rtx opalign
3683 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3685 gcc_assert (size);
3687 /* Try to use a memory block compare insn - either cmpstr
3688 or cmpmem will do. */
3689 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
3690 cmp_mode != VOIDmode;
3691 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
3693 cmp_code = cmpmem_optab[cmp_mode];
3694 if (cmp_code == CODE_FOR_nothing)
3695 cmp_code = cmpstr_optab[cmp_mode];
3696 if (cmp_code == CODE_FOR_nothing)
3697 cmp_code = cmpstrn_optab[cmp_mode];
3698 if (cmp_code == CODE_FOR_nothing)
3699 continue;
3701 /* Must make sure the size fits the insn's mode. */
3702 if ((GET_CODE (size) == CONST_INT
3703 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
3704 || (GET_MODE_BITSIZE (GET_MODE (size))
3705 > GET_MODE_BITSIZE (cmp_mode)))
3706 continue;
3708 result_mode = insn_data[cmp_code].operand[0].mode;
3709 result = gen_reg_rtx (result_mode);
3710 size = convert_to_mode (cmp_mode, size, 1);
3711 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
3713 *px = result;
3714 *py = const0_rtx;
3715 *pmode = result_mode;
3716 return;
3719 /* Otherwise call a library function, memcmp. */
3720 libfunc = memcmp_libfunc;
3721 length_type = sizetype;
3722 result_mode = TYPE_MODE (integer_type_node);
3723 cmp_mode = TYPE_MODE (length_type);
3724 size = convert_to_mode (TYPE_MODE (length_type), size,
3725 TYPE_UNSIGNED (length_type));
3727 result = emit_library_call_value (libfunc, 0, LCT_PURE_MAKE_BLOCK,
3728 result_mode, 3,
3729 XEXP (x, 0), Pmode,
3730 XEXP (y, 0), Pmode,
3731 size, cmp_mode);
3732 *px = result;
3733 *py = const0_rtx;
3734 *pmode = result_mode;
3735 return;
3738 /* Don't allow operands to the compare to trap, as that can put the
3739 compare and branch in different basic blocks. */
3740 if (flag_non_call_exceptions)
3742 if (may_trap_p (x))
3743 x = force_reg (mode, x);
3744 if (may_trap_p (y))
3745 y = force_reg (mode, y);
3748 *px = x;
3749 *py = y;
3750 if (can_compare_p (*pcomparison, mode, purpose))
3751 return;
3753 /* Handle a lib call just for the mode we are using. */
3755 if (cmp_optab->handlers[(int) mode].libfunc && !SCALAR_FLOAT_MODE_P (mode))
3757 rtx libfunc = cmp_optab->handlers[(int) mode].libfunc;
3758 rtx result;
3760 /* If we want unsigned, and this mode has a distinct unsigned
3761 comparison routine, use that. */
3762 if (unsignedp && ucmp_optab->handlers[(int) mode].libfunc)
3763 libfunc = ucmp_optab->handlers[(int) mode].libfunc;
3765 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK,
3766 word_mode, 2, x, mode, y, mode);
3768 /* There are two kinds of comparison routines. Biased routines
3769 return 0/1/2, and unbiased routines return -1/0/1. Other parts
3770 of gcc expect that the comparison operation is equivalent
3771 to the modified comparison. For signed comparisons compare the
3772 result against 1 in the biased case, and zero in the unbiased
3773 case. For unsigned comparisons always compare against 1 after
3774 biasing the unbiased result by adding 1. This gives us a way to
3775 represent LTU. */
3776 *px = result;
3777 *pmode = word_mode;
3778 *py = const1_rtx;
3780 if (!TARGET_LIB_INT_CMP_BIASED)
3782 if (*punsignedp)
3783 *px = plus_constant (result, 1);
3784 else
3785 *py = const0_rtx;
3787 return;
3790 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3791 prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
3794 /* Before emitting an insn with code ICODE, make sure that X, which is going
3795 to be used for operand OPNUM of the insn, is converted from mode MODE to
3796 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3797 that it is accepted by the operand predicate. Return the new value. */
3799 static rtx
3800 prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
3801 enum machine_mode wider_mode, int unsignedp)
3803 if (mode != wider_mode)
3804 x = convert_modes (wider_mode, mode, x, unsignedp);
3806 if (!insn_data[icode].operand[opnum].predicate
3807 (x, insn_data[icode].operand[opnum].mode))
3809 if (no_new_pseudos)
3810 return NULL_RTX;
3811 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x);
3814 return x;
3817 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3818 we can do the comparison.
3819 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3820 be NULL_RTX which indicates that only a comparison is to be generated. */
3822 static void
3823 emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
3824 enum rtx_code comparison, int unsignedp, rtx label)
3826 rtx test = gen_rtx_fmt_ee (comparison, mode, x, y);
3827 enum mode_class class = GET_MODE_CLASS (mode);
3828 enum machine_mode wider_mode = mode;
3830 /* Try combined insns first. */
3833 enum insn_code icode;
3834 PUT_MODE (test, wider_mode);
3836 if (label)
3838 icode = cbranch_optab->handlers[(int) wider_mode].insn_code;
3840 if (icode != CODE_FOR_nothing
3841 && insn_data[icode].operand[0].predicate (test, wider_mode))
3843 x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp);
3844 y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp);
3845 emit_jump_insn (GEN_FCN (icode) (test, x, y, label));
3846 return;
3850 /* Handle some compares against zero. */
3851 icode = (int) tst_optab->handlers[(int) wider_mode].insn_code;
3852 if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing)
3854 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3855 emit_insn (GEN_FCN (icode) (x));
3856 if (label)
3857 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3858 return;
3861 /* Handle compares for which there is a directly suitable insn. */
3863 icode = (int) cmp_optab->handlers[(int) wider_mode].insn_code;
3864 if (icode != CODE_FOR_nothing)
3866 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3867 y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp);
3868 emit_insn (GEN_FCN (icode) (x, y));
3869 if (label)
3870 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3871 return;
3874 if (!CLASS_HAS_WIDER_MODES_P (class))
3875 break;
3877 wider_mode = GET_MODE_WIDER_MODE (wider_mode);
3879 while (wider_mode != VOIDmode);
3881 gcc_unreachable ();
3884 /* Generate code to compare X with Y so that the condition codes are
3885 set and to jump to LABEL if the condition is true. If X is a
3886 constant and Y is not a constant, then the comparison is swapped to
3887 ensure that the comparison RTL has the canonical form.
3889 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3890 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3891 the proper branch condition code.
3893 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3895 MODE is the mode of the inputs (in case they are const_int).
3897 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3898 be passed unchanged to emit_cmp_insn, then potentially converted into an
3899 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3901 void
3902 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
3903 enum machine_mode mode, int unsignedp, rtx label)
3905 rtx op0 = x, op1 = y;
3907 /* Swap operands and condition to ensure canonical RTL. */
3908 if (swap_commutative_operands_p (x, y))
3910 /* If we're not emitting a branch, this means some caller
3911 is out of sync. */
3912 gcc_assert (label);
3914 op0 = y, op1 = x;
3915 comparison = swap_condition (comparison);
3918 #ifdef HAVE_cc0
3919 /* If OP0 is still a constant, then both X and Y must be constants.
3920 Force X into a register to create canonical RTL. */
3921 if (CONSTANT_P (op0))
3922 op0 = force_reg (mode, op0);
3923 #endif
3925 if (unsignedp)
3926 comparison = unsigned_condition (comparison);
3928 prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp,
3929 ccp_jump);
3930 emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label);
3933 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3935 void
3936 emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
3937 enum machine_mode mode, int unsignedp)
3939 emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0);
3942 /* Emit a library call comparison between floating point X and Y.
3943 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3945 static void
3946 prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
3947 enum machine_mode *pmode, int *punsignedp)
3949 enum rtx_code comparison = *pcomparison;
3950 enum rtx_code swapped = swap_condition (comparison);
3951 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
3952 rtx x = *px;
3953 rtx y = *py;
3954 enum machine_mode orig_mode = GET_MODE (x);
3955 enum machine_mode mode;
3956 rtx value, target, insns, equiv;
3957 rtx libfunc = 0;
3958 bool reversed_p = false;
3960 for (mode = orig_mode;
3961 mode != VOIDmode;
3962 mode = GET_MODE_WIDER_MODE (mode))
3964 if ((libfunc = code_to_optab[comparison]->handlers[mode].libfunc))
3965 break;
3967 if ((libfunc = code_to_optab[swapped]->handlers[mode].libfunc))
3969 rtx tmp;
3970 tmp = x; x = y; y = tmp;
3971 comparison = swapped;
3972 break;
3975 if ((libfunc = code_to_optab[reversed]->handlers[mode].libfunc)
3976 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, reversed))
3978 comparison = reversed;
3979 reversed_p = true;
3980 break;
3984 gcc_assert (mode != VOIDmode);
3986 if (mode != orig_mode)
3988 x = convert_to_mode (mode, x, 0);
3989 y = convert_to_mode (mode, y, 0);
3992 /* Attach a REG_EQUAL note describing the semantics of the libcall to
3993 the RTL. The allows the RTL optimizers to delete the libcall if the
3994 condition can be determined at compile-time. */
3995 if (comparison == UNORDERED)
3997 rtx temp = simplify_gen_relational (NE, word_mode, mode, x, x);
3998 equiv = simplify_gen_relational (NE, word_mode, mode, y, y);
3999 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
4000 temp, const_true_rtx, equiv);
4002 else
4004 equiv = simplify_gen_relational (comparison, word_mode, mode, x, y);
4005 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4007 rtx true_rtx, false_rtx;
4009 switch (comparison)
4011 case EQ:
4012 true_rtx = const0_rtx;
4013 false_rtx = const_true_rtx;
4014 break;
4016 case NE:
4017 true_rtx = const_true_rtx;
4018 false_rtx = const0_rtx;
4019 break;
4021 case GT:
4022 true_rtx = const1_rtx;
4023 false_rtx = const0_rtx;
4024 break;
4026 case GE:
4027 true_rtx = const0_rtx;
4028 false_rtx = constm1_rtx;
4029 break;
4031 case LT:
4032 true_rtx = constm1_rtx;
4033 false_rtx = const0_rtx;
4034 break;
4036 case LE:
4037 true_rtx = const0_rtx;
4038 false_rtx = const1_rtx;
4039 break;
4041 default:
4042 gcc_unreachable ();
4044 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
4045 equiv, true_rtx, false_rtx);
4049 start_sequence ();
4050 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4051 word_mode, 2, x, mode, y, mode);
4052 insns = get_insns ();
4053 end_sequence ();
4055 target = gen_reg_rtx (word_mode);
4056 emit_libcall_block (insns, target, value, equiv);
4058 if (comparison == UNORDERED
4059 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4060 comparison = reversed_p ? EQ : NE;
4062 *px = target;
4063 *py = const0_rtx;
4064 *pmode = word_mode;
4065 *pcomparison = comparison;
4066 *punsignedp = 0;
4069 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4071 void
4072 emit_indirect_jump (rtx loc)
4074 if (!insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate
4075 (loc, Pmode))
4076 loc = copy_to_mode_reg (Pmode, loc);
4078 emit_jump_insn (gen_indirect_jump (loc));
4079 emit_barrier ();
4082 #ifdef HAVE_conditional_move
4084 /* Emit a conditional move instruction if the machine supports one for that
4085 condition and machine mode.
4087 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4088 the mode to use should they be constants. If it is VOIDmode, they cannot
4089 both be constants.
4091 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4092 should be stored there. MODE is the mode to use should they be constants.
4093 If it is VOIDmode, they cannot both be constants.
4095 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4096 is not supported. */
4099 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4100 enum machine_mode cmode, rtx op2, rtx op3,
4101 enum machine_mode mode, int unsignedp)
4103 rtx tem, subtarget, comparison, insn;
4104 enum insn_code icode;
4105 enum rtx_code reversed;
4107 /* If one operand is constant, make it the second one. Only do this
4108 if the other operand is not constant as well. */
4110 if (swap_commutative_operands_p (op0, op1))
4112 tem = op0;
4113 op0 = op1;
4114 op1 = tem;
4115 code = swap_condition (code);
4118 /* get_condition will prefer to generate LT and GT even if the old
4119 comparison was against zero, so undo that canonicalization here since
4120 comparisons against zero are cheaper. */
4121 if (code == LT && op1 == const1_rtx)
4122 code = LE, op1 = const0_rtx;
4123 else if (code == GT && op1 == constm1_rtx)
4124 code = GE, op1 = const0_rtx;
4126 if (cmode == VOIDmode)
4127 cmode = GET_MODE (op0);
4129 if (swap_commutative_operands_p (op2, op3)
4130 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4131 != UNKNOWN))
4133 tem = op2;
4134 op2 = op3;
4135 op3 = tem;
4136 code = reversed;
4139 if (mode == VOIDmode)
4140 mode = GET_MODE (op2);
4142 icode = movcc_gen_code[mode];
4144 if (icode == CODE_FOR_nothing)
4145 return 0;
4147 if (!target)
4148 target = gen_reg_rtx (mode);
4150 subtarget = target;
4152 /* If the insn doesn't accept these operands, put them in pseudos. */
4154 if (!insn_data[icode].operand[0].predicate
4155 (subtarget, insn_data[icode].operand[0].mode))
4156 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4158 if (!insn_data[icode].operand[2].predicate
4159 (op2, insn_data[icode].operand[2].mode))
4160 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4162 if (!insn_data[icode].operand[3].predicate
4163 (op3, insn_data[icode].operand[3].mode))
4164 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4166 /* Everything should now be in the suitable form, so emit the compare insn
4167 and then the conditional move. */
4169 comparison
4170 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4172 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4173 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4174 return NULL and let the caller figure out how best to deal with this
4175 situation. */
4176 if (GET_CODE (comparison) != code)
4177 return NULL_RTX;
4179 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4181 /* If that failed, then give up. */
4182 if (insn == 0)
4183 return 0;
4185 emit_insn (insn);
4187 if (subtarget != target)
4188 convert_move (target, subtarget, 0);
4190 return target;
4193 /* Return nonzero if a conditional move of mode MODE is supported.
4195 This function is for combine so it can tell whether an insn that looks
4196 like a conditional move is actually supported by the hardware. If we
4197 guess wrong we lose a bit on optimization, but that's it. */
4198 /* ??? sparc64 supports conditionally moving integers values based on fp
4199 comparisons, and vice versa. How do we handle them? */
4202 can_conditionally_move_p (enum machine_mode mode)
4204 if (movcc_gen_code[mode] != CODE_FOR_nothing)
4205 return 1;
4207 return 0;
4210 #endif /* HAVE_conditional_move */
4212 /* Emit a conditional addition instruction if the machine supports one for that
4213 condition and machine mode.
4215 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4216 the mode to use should they be constants. If it is VOIDmode, they cannot
4217 both be constants.
4219 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4220 should be stored there. MODE is the mode to use should they be constants.
4221 If it is VOIDmode, they cannot both be constants.
4223 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4224 is not supported. */
4227 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4228 enum machine_mode cmode, rtx op2, rtx op3,
4229 enum machine_mode mode, int unsignedp)
4231 rtx tem, subtarget, comparison, insn;
4232 enum insn_code icode;
4233 enum rtx_code reversed;
4235 /* If one operand is constant, make it the second one. Only do this
4236 if the other operand is not constant as well. */
4238 if (swap_commutative_operands_p (op0, op1))
4240 tem = op0;
4241 op0 = op1;
4242 op1 = tem;
4243 code = swap_condition (code);
4246 /* get_condition will prefer to generate LT and GT even if the old
4247 comparison was against zero, so undo that canonicalization here since
4248 comparisons against zero are cheaper. */
4249 if (code == LT && op1 == const1_rtx)
4250 code = LE, op1 = const0_rtx;
4251 else if (code == GT && op1 == constm1_rtx)
4252 code = GE, op1 = const0_rtx;
4254 if (cmode == VOIDmode)
4255 cmode = GET_MODE (op0);
4257 if (swap_commutative_operands_p (op2, op3)
4258 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4259 != UNKNOWN))
4261 tem = op2;
4262 op2 = op3;
4263 op3 = tem;
4264 code = reversed;
4267 if (mode == VOIDmode)
4268 mode = GET_MODE (op2);
4270 icode = addcc_optab->handlers[(int) mode].insn_code;
4272 if (icode == CODE_FOR_nothing)
4273 return 0;
4275 if (!target)
4276 target = gen_reg_rtx (mode);
4278 /* If the insn doesn't accept these operands, put them in pseudos. */
4280 if (!insn_data[icode].operand[0].predicate
4281 (target, insn_data[icode].operand[0].mode))
4282 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4283 else
4284 subtarget = target;
4286 if (!insn_data[icode].operand[2].predicate
4287 (op2, insn_data[icode].operand[2].mode))
4288 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4290 if (!insn_data[icode].operand[3].predicate
4291 (op3, insn_data[icode].operand[3].mode))
4292 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4294 /* Everything should now be in the suitable form, so emit the compare insn
4295 and then the conditional move. */
4297 comparison
4298 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4300 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4301 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4302 return NULL and let the caller figure out how best to deal with this
4303 situation. */
4304 if (GET_CODE (comparison) != code)
4305 return NULL_RTX;
4307 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4309 /* If that failed, then give up. */
4310 if (insn == 0)
4311 return 0;
4313 emit_insn (insn);
4315 if (subtarget != target)
4316 convert_move (target, subtarget, 0);
4318 return target;
4321 /* These functions attempt to generate an insn body, rather than
4322 emitting the insn, but if the gen function already emits them, we
4323 make no attempt to turn them back into naked patterns. */
4325 /* Generate and return an insn body to add Y to X. */
4328 gen_add2_insn (rtx x, rtx y)
4330 int icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4332 gcc_assert (insn_data[icode].operand[0].predicate
4333 (x, insn_data[icode].operand[0].mode));
4334 gcc_assert (insn_data[icode].operand[1].predicate
4335 (x, insn_data[icode].operand[1].mode));
4336 gcc_assert (insn_data[icode].operand[2].predicate
4337 (y, insn_data[icode].operand[2].mode));
4339 return GEN_FCN (icode) (x, x, y);
4342 /* Generate and return an insn body to add r1 and c,
4343 storing the result in r0. */
4345 gen_add3_insn (rtx r0, rtx r1, rtx c)
4347 int icode = (int) add_optab->handlers[(int) GET_MODE (r0)].insn_code;
4349 if (icode == CODE_FOR_nothing
4350 || !(insn_data[icode].operand[0].predicate
4351 (r0, insn_data[icode].operand[0].mode))
4352 || !(insn_data[icode].operand[1].predicate
4353 (r1, insn_data[icode].operand[1].mode))
4354 || !(insn_data[icode].operand[2].predicate
4355 (c, insn_data[icode].operand[2].mode)))
4356 return NULL_RTX;
4358 return GEN_FCN (icode) (r0, r1, c);
4362 have_add2_insn (rtx x, rtx y)
4364 int icode;
4366 gcc_assert (GET_MODE (x) != VOIDmode);
4368 icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4370 if (icode == CODE_FOR_nothing)
4371 return 0;
4373 if (!(insn_data[icode].operand[0].predicate
4374 (x, insn_data[icode].operand[0].mode))
4375 || !(insn_data[icode].operand[1].predicate
4376 (x, insn_data[icode].operand[1].mode))
4377 || !(insn_data[icode].operand[2].predicate
4378 (y, insn_data[icode].operand[2].mode)))
4379 return 0;
4381 return 1;
4384 /* Generate and return an insn body to subtract Y from X. */
4387 gen_sub2_insn (rtx x, rtx y)
4389 int icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4391 gcc_assert (insn_data[icode].operand[0].predicate
4392 (x, insn_data[icode].operand[0].mode));
4393 gcc_assert (insn_data[icode].operand[1].predicate
4394 (x, insn_data[icode].operand[1].mode));
4395 gcc_assert (insn_data[icode].operand[2].predicate
4396 (y, insn_data[icode].operand[2].mode));
4398 return GEN_FCN (icode) (x, x, y);
4401 /* Generate and return an insn body to subtract r1 and c,
4402 storing the result in r0. */
4404 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4406 int icode = (int) sub_optab->handlers[(int) GET_MODE (r0)].insn_code;
4408 if (icode == CODE_FOR_nothing
4409 || !(insn_data[icode].operand[0].predicate
4410 (r0, insn_data[icode].operand[0].mode))
4411 || !(insn_data[icode].operand[1].predicate
4412 (r1, insn_data[icode].operand[1].mode))
4413 || !(insn_data[icode].operand[2].predicate
4414 (c, insn_data[icode].operand[2].mode)))
4415 return NULL_RTX;
4417 return GEN_FCN (icode) (r0, r1, c);
4421 have_sub2_insn (rtx x, rtx y)
4423 int icode;
4425 gcc_assert (GET_MODE (x) != VOIDmode);
4427 icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4429 if (icode == CODE_FOR_nothing)
4430 return 0;
4432 if (!(insn_data[icode].operand[0].predicate
4433 (x, insn_data[icode].operand[0].mode))
4434 || !(insn_data[icode].operand[1].predicate
4435 (x, insn_data[icode].operand[1].mode))
4436 || !(insn_data[icode].operand[2].predicate
4437 (y, insn_data[icode].operand[2].mode)))
4438 return 0;
4440 return 1;
4443 /* Generate the body of an instruction to copy Y into X.
4444 It may be a list of insns, if one insn isn't enough. */
4447 gen_move_insn (rtx x, rtx y)
4449 rtx seq;
4451 start_sequence ();
4452 emit_move_insn_1 (x, y);
4453 seq = get_insns ();
4454 end_sequence ();
4455 return seq;
4458 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4459 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4460 no such operation exists, CODE_FOR_nothing will be returned. */
4462 enum insn_code
4463 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4464 int unsignedp)
4466 convert_optab tab;
4467 #ifdef HAVE_ptr_extend
4468 if (unsignedp < 0)
4469 return CODE_FOR_ptr_extend;
4470 #endif
4472 tab = unsignedp ? zext_optab : sext_optab;
4473 return tab->handlers[to_mode][from_mode].insn_code;
4476 /* Generate the body of an insn to extend Y (with mode MFROM)
4477 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4480 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4481 enum machine_mode mfrom, int unsignedp)
4483 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4484 return GEN_FCN (icode) (x, y);
4487 /* can_fix_p and can_float_p say whether the target machine
4488 can directly convert a given fixed point type to
4489 a given floating point type, or vice versa.
4490 The returned value is the CODE_FOR_... value to use,
4491 or CODE_FOR_nothing if these modes cannot be directly converted.
4493 *TRUNCP_PTR is set to 1 if it is necessary to output
4494 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4496 static enum insn_code
4497 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4498 int unsignedp, int *truncp_ptr)
4500 convert_optab tab;
4501 enum insn_code icode;
4503 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4504 icode = tab->handlers[fixmode][fltmode].insn_code;
4505 if (icode != CODE_FOR_nothing)
4507 *truncp_ptr = 0;
4508 return icode;
4511 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4512 for this to work. We need to rework the fix* and ftrunc* patterns
4513 and documentation. */
4514 tab = unsignedp ? ufix_optab : sfix_optab;
4515 icode = tab->handlers[fixmode][fltmode].insn_code;
4516 if (icode != CODE_FOR_nothing
4517 && ftrunc_optab->handlers[fltmode].insn_code != CODE_FOR_nothing)
4519 *truncp_ptr = 1;
4520 return icode;
4523 *truncp_ptr = 0;
4524 return CODE_FOR_nothing;
4527 static enum insn_code
4528 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4529 int unsignedp)
4531 convert_optab tab;
4533 tab = unsignedp ? ufloat_optab : sfloat_optab;
4534 return tab->handlers[fltmode][fixmode].insn_code;
4537 /* Generate code to convert FROM to floating point
4538 and store in TO. FROM must be fixed point and not VOIDmode.
4539 UNSIGNEDP nonzero means regard FROM as unsigned.
4540 Normally this is done by correcting the final value
4541 if it is negative. */
4543 void
4544 expand_float (rtx to, rtx from, int unsignedp)
4546 enum insn_code icode;
4547 rtx target = to;
4548 enum machine_mode fmode, imode;
4549 bool can_do_signed = false;
4551 /* Crash now, because we won't be able to decide which mode to use. */
4552 gcc_assert (GET_MODE (from) != VOIDmode);
4554 /* Look for an insn to do the conversion. Do it in the specified
4555 modes if possible; otherwise convert either input, output or both to
4556 wider mode. If the integer mode is wider than the mode of FROM,
4557 we can do the conversion signed even if the input is unsigned. */
4559 for (fmode = GET_MODE (to); fmode != VOIDmode;
4560 fmode = GET_MODE_WIDER_MODE (fmode))
4561 for (imode = GET_MODE (from); imode != VOIDmode;
4562 imode = GET_MODE_WIDER_MODE (imode))
4564 int doing_unsigned = unsignedp;
4566 if (fmode != GET_MODE (to)
4567 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from)))
4568 continue;
4570 icode = can_float_p (fmode, imode, unsignedp);
4571 if (icode == CODE_FOR_nothing && unsignedp)
4573 enum insn_code scode = can_float_p (fmode, imode, 0);
4574 if (scode != CODE_FOR_nothing)
4575 can_do_signed = true;
4576 if (imode != GET_MODE (from))
4577 icode = scode, doing_unsigned = 0;
4580 if (icode != CODE_FOR_nothing)
4582 if (imode != GET_MODE (from))
4583 from = convert_to_mode (imode, from, unsignedp);
4585 if (fmode != GET_MODE (to))
4586 target = gen_reg_rtx (fmode);
4588 emit_unop_insn (icode, target, from,
4589 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4591 if (target != to)
4592 convert_move (to, target, 0);
4593 return;
4597 /* Unsigned integer, and no way to convert directly. For binary
4598 floating point modes, convert as signed, then conditionally adjust
4599 the result. */
4600 if (unsignedp && can_do_signed && !DECIMAL_FLOAT_MODE_P (GET_MODE (to)))
4602 rtx label = gen_label_rtx ();
4603 rtx temp;
4604 REAL_VALUE_TYPE offset;
4606 /* Look for a usable floating mode FMODE wider than the source and at
4607 least as wide as the target. Using FMODE will avoid rounding woes
4608 with unsigned values greater than the signed maximum value. */
4610 for (fmode = GET_MODE (to); fmode != VOIDmode;
4611 fmode = GET_MODE_WIDER_MODE (fmode))
4612 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
4613 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
4614 break;
4616 if (fmode == VOIDmode)
4618 /* There is no such mode. Pretend the target is wide enough. */
4619 fmode = GET_MODE (to);
4621 /* Avoid double-rounding when TO is narrower than FROM. */
4622 if ((significand_size (fmode) + 1)
4623 < GET_MODE_BITSIZE (GET_MODE (from)))
4625 rtx temp1;
4626 rtx neglabel = gen_label_rtx ();
4628 /* Don't use TARGET if it isn't a register, is a hard register,
4629 or is the wrong mode. */
4630 if (!REG_P (target)
4631 || REGNO (target) < FIRST_PSEUDO_REGISTER
4632 || GET_MODE (target) != fmode)
4633 target = gen_reg_rtx (fmode);
4635 imode = GET_MODE (from);
4636 do_pending_stack_adjust ();
4638 /* Test whether the sign bit is set. */
4639 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4640 0, neglabel);
4642 /* The sign bit is not set. Convert as signed. */
4643 expand_float (target, from, 0);
4644 emit_jump_insn (gen_jump (label));
4645 emit_barrier ();
4647 /* The sign bit is set.
4648 Convert to a usable (positive signed) value by shifting right
4649 one bit, while remembering if a nonzero bit was shifted
4650 out; i.e., compute (from & 1) | (from >> 1). */
4652 emit_label (neglabel);
4653 temp = expand_binop (imode, and_optab, from, const1_rtx,
4654 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4655 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
4656 NULL_RTX, 1);
4657 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4658 OPTAB_LIB_WIDEN);
4659 expand_float (target, temp, 0);
4661 /* Multiply by 2 to undo the shift above. */
4662 temp = expand_binop (fmode, add_optab, target, target,
4663 target, 0, OPTAB_LIB_WIDEN);
4664 if (temp != target)
4665 emit_move_insn (target, temp);
4667 do_pending_stack_adjust ();
4668 emit_label (label);
4669 goto done;
4673 /* If we are about to do some arithmetic to correct for an
4674 unsigned operand, do it in a pseudo-register. */
4676 if (GET_MODE (to) != fmode
4677 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4678 target = gen_reg_rtx (fmode);
4680 /* Convert as signed integer to floating. */
4681 expand_float (target, from, 0);
4683 /* If FROM is negative (and therefore TO is negative),
4684 correct its value by 2**bitwidth. */
4686 do_pending_stack_adjust ();
4687 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
4688 0, label);
4691 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)));
4692 temp = expand_binop (fmode, add_optab, target,
4693 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
4694 target, 0, OPTAB_LIB_WIDEN);
4695 if (temp != target)
4696 emit_move_insn (target, temp);
4698 do_pending_stack_adjust ();
4699 emit_label (label);
4700 goto done;
4703 /* No hardware instruction available; call a library routine. */
4705 rtx libfunc;
4706 rtx insns;
4707 rtx value;
4708 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4710 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
4711 from = convert_to_mode (SImode, from, unsignedp);
4713 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4714 gcc_assert (libfunc);
4716 start_sequence ();
4718 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4719 GET_MODE (to), 1, from,
4720 GET_MODE (from));
4721 insns = get_insns ();
4722 end_sequence ();
4724 emit_libcall_block (insns, target, value,
4725 gen_rtx_FLOAT (GET_MODE (to), from));
4728 done:
4730 /* Copy result to requested destination
4731 if we have been computing in a temp location. */
4733 if (target != to)
4735 if (GET_MODE (target) == GET_MODE (to))
4736 emit_move_insn (to, target);
4737 else
4738 convert_move (to, target, 0);
4742 /* Generate code to convert FROM to fixed point and store in TO. FROM
4743 must be floating point. */
4745 void
4746 expand_fix (rtx to, rtx from, int unsignedp)
4748 enum insn_code icode;
4749 rtx target = to;
4750 enum machine_mode fmode, imode;
4751 int must_trunc = 0;
4753 /* We first try to find a pair of modes, one real and one integer, at
4754 least as wide as FROM and TO, respectively, in which we can open-code
4755 this conversion. If the integer mode is wider than the mode of TO,
4756 we can do the conversion either signed or unsigned. */
4758 for (fmode = GET_MODE (from); fmode != VOIDmode;
4759 fmode = GET_MODE_WIDER_MODE (fmode))
4760 for (imode = GET_MODE (to); imode != VOIDmode;
4761 imode = GET_MODE_WIDER_MODE (imode))
4763 int doing_unsigned = unsignedp;
4765 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
4766 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
4767 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
4769 if (icode != CODE_FOR_nothing)
4771 if (fmode != GET_MODE (from))
4772 from = convert_to_mode (fmode, from, 0);
4774 if (must_trunc)
4776 rtx temp = gen_reg_rtx (GET_MODE (from));
4777 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
4778 temp, 0);
4781 if (imode != GET_MODE (to))
4782 target = gen_reg_rtx (imode);
4784 emit_unop_insn (icode, target, from,
4785 doing_unsigned ? UNSIGNED_FIX : FIX);
4786 if (target != to)
4787 convert_move (to, target, unsignedp);
4788 return;
4792 /* For an unsigned conversion, there is one more way to do it.
4793 If we have a signed conversion, we generate code that compares
4794 the real value to the largest representable positive number. If if
4795 is smaller, the conversion is done normally. Otherwise, subtract
4796 one plus the highest signed number, convert, and add it back.
4798 We only need to check all real modes, since we know we didn't find
4799 anything with a wider integer mode.
4801 This code used to extend FP value into mode wider than the destination.
4802 This is not needed. Consider, for instance conversion from SFmode
4803 into DImode.
4805 The hot path through the code is dealing with inputs smaller than 2^63
4806 and doing just the conversion, so there is no bits to lose.
4808 In the other path we know the value is positive in the range 2^63..2^64-1
4809 inclusive. (as for other imput overflow happens and result is undefined)
4810 So we know that the most important bit set in mantissa corresponds to
4811 2^63. The subtraction of 2^63 should not generate any rounding as it
4812 simply clears out that bit. The rest is trivial. */
4814 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
4815 for (fmode = GET_MODE (from); fmode != VOIDmode;
4816 fmode = GET_MODE_WIDER_MODE (fmode))
4817 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0,
4818 &must_trunc))
4820 int bitsize;
4821 REAL_VALUE_TYPE offset;
4822 rtx limit, lab1, lab2, insn;
4824 bitsize = GET_MODE_BITSIZE (GET_MODE (to));
4825 real_2expN (&offset, bitsize - 1);
4826 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
4827 lab1 = gen_label_rtx ();
4828 lab2 = gen_label_rtx ();
4830 if (fmode != GET_MODE (from))
4831 from = convert_to_mode (fmode, from, 0);
4833 /* See if we need to do the subtraction. */
4834 do_pending_stack_adjust ();
4835 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
4836 0, lab1);
4838 /* If not, do the signed "fix" and branch around fixup code. */
4839 expand_fix (to, from, 0);
4840 emit_jump_insn (gen_jump (lab2));
4841 emit_barrier ();
4843 /* Otherwise, subtract 2**(N-1), convert to signed number,
4844 then add 2**(N-1). Do the addition using XOR since this
4845 will often generate better code. */
4846 emit_label (lab1);
4847 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
4848 NULL_RTX, 0, OPTAB_LIB_WIDEN);
4849 expand_fix (to, target, 0);
4850 target = expand_binop (GET_MODE (to), xor_optab, to,
4851 gen_int_mode
4852 ((HOST_WIDE_INT) 1 << (bitsize - 1),
4853 GET_MODE (to)),
4854 to, 1, OPTAB_LIB_WIDEN);
4856 if (target != to)
4857 emit_move_insn (to, target);
4859 emit_label (lab2);
4861 if (mov_optab->handlers[(int) GET_MODE (to)].insn_code
4862 != CODE_FOR_nothing)
4864 /* Make a place for a REG_NOTE and add it. */
4865 insn = emit_move_insn (to, to);
4866 set_unique_reg_note (insn,
4867 REG_EQUAL,
4868 gen_rtx_fmt_e (UNSIGNED_FIX,
4869 GET_MODE (to),
4870 copy_rtx (from)));
4873 return;
4876 /* We can't do it with an insn, so use a library call. But first ensure
4877 that the mode of TO is at least as wide as SImode, since those are the
4878 only library calls we know about. */
4880 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
4882 target = gen_reg_rtx (SImode);
4884 expand_fix (target, from, unsignedp);
4886 else
4888 rtx insns;
4889 rtx value;
4890 rtx libfunc;
4892 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
4893 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4894 gcc_assert (libfunc);
4896 start_sequence ();
4898 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4899 GET_MODE (to), 1, from,
4900 GET_MODE (from));
4901 insns = get_insns ();
4902 end_sequence ();
4904 emit_libcall_block (insns, target, value,
4905 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
4906 GET_MODE (to), from));
4909 if (target != to)
4911 if (GET_MODE (to) == GET_MODE (target))
4912 emit_move_insn (to, target);
4913 else
4914 convert_move (to, target, 0);
4918 /* Generate code to convert FROM to fixed point and store in TO. FROM
4919 must be floating point, TO must be signed. Use the conversion optab
4920 TAB to do the conversion. */
4922 bool
4923 expand_sfix_optab (rtx to, rtx from, convert_optab tab)
4925 enum insn_code icode;
4926 rtx target = to;
4927 enum machine_mode fmode, imode;
4929 /* We first try to find a pair of modes, one real and one integer, at
4930 least as wide as FROM and TO, respectively, in which we can open-code
4931 this conversion. If the integer mode is wider than the mode of TO,
4932 we can do the conversion either signed or unsigned. */
4934 for (fmode = GET_MODE (from); fmode != VOIDmode;
4935 fmode = GET_MODE_WIDER_MODE (fmode))
4936 for (imode = GET_MODE (to); imode != VOIDmode;
4937 imode = GET_MODE_WIDER_MODE (imode))
4939 icode = tab->handlers[imode][fmode].insn_code;
4940 if (icode != CODE_FOR_nothing)
4942 if (fmode != GET_MODE (from))
4943 from = convert_to_mode (fmode, from, 0);
4945 if (imode != GET_MODE (to))
4946 target = gen_reg_rtx (imode);
4948 emit_unop_insn (icode, target, from, UNKNOWN);
4949 if (target != to)
4950 convert_move (to, target, 0);
4951 return true;
4955 return false;
4958 /* Report whether we have an instruction to perform the operation
4959 specified by CODE on operands of mode MODE. */
4961 have_insn_for (enum rtx_code code, enum machine_mode mode)
4963 return (code_to_optab[(int) code] != 0
4964 && (code_to_optab[(int) code]->handlers[(int) mode].insn_code
4965 != CODE_FOR_nothing));
4968 /* Create a blank optab. */
4969 static optab
4970 new_optab (void)
4972 int i;
4973 optab op = ggc_alloc (sizeof (struct optab));
4974 for (i = 0; i < NUM_MACHINE_MODES; i++)
4976 op->handlers[i].insn_code = CODE_FOR_nothing;
4977 op->handlers[i].libfunc = 0;
4980 return op;
4983 static convert_optab
4984 new_convert_optab (void)
4986 int i, j;
4987 convert_optab op = ggc_alloc (sizeof (struct convert_optab));
4988 for (i = 0; i < NUM_MACHINE_MODES; i++)
4989 for (j = 0; j < NUM_MACHINE_MODES; j++)
4991 op->handlers[i][j].insn_code = CODE_FOR_nothing;
4992 op->handlers[i][j].libfunc = 0;
4994 return op;
4997 /* Same, but fill in its code as CODE, and write it into the
4998 code_to_optab table. */
4999 static inline optab
5000 init_optab (enum rtx_code code)
5002 optab op = new_optab ();
5003 op->code = code;
5004 code_to_optab[(int) code] = op;
5005 return op;
5008 /* Same, but fill in its code as CODE, and do _not_ write it into
5009 the code_to_optab table. */
5010 static inline optab
5011 init_optabv (enum rtx_code code)
5013 optab op = new_optab ();
5014 op->code = code;
5015 return op;
5018 /* Conversion optabs never go in the code_to_optab table. */
5019 static inline convert_optab
5020 init_convert_optab (enum rtx_code code)
5022 convert_optab op = new_convert_optab ();
5023 op->code = code;
5024 return op;
5027 /* Initialize the libfunc fields of an entire group of entries in some
5028 optab. Each entry is set equal to a string consisting of a leading
5029 pair of underscores followed by a generic operation name followed by
5030 a mode name (downshifted to lowercase) followed by a single character
5031 representing the number of operands for the given operation (which is
5032 usually one of the characters '2', '3', or '4').
5034 OPTABLE is the table in which libfunc fields are to be initialized.
5035 FIRST_MODE is the first machine mode index in the given optab to
5036 initialize.
5037 LAST_MODE is the last machine mode index in the given optab to
5038 initialize.
5039 OPNAME is the generic (string) name of the operation.
5040 SUFFIX is the character which specifies the number of operands for
5041 the given generic operation.
5044 static void
5045 init_libfuncs (optab optable, int first_mode, int last_mode,
5046 const char *opname, int suffix)
5048 int mode;
5049 unsigned opname_len = strlen (opname);
5051 for (mode = first_mode; (int) mode <= (int) last_mode;
5052 mode = (enum machine_mode) ((int) mode + 1))
5054 const char *mname = GET_MODE_NAME (mode);
5055 unsigned mname_len = strlen (mname);
5056 char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1);
5057 char *p;
5058 const char *q;
5060 p = libfunc_name;
5061 *p++ = '_';
5062 *p++ = '_';
5063 for (q = opname; *q; )
5064 *p++ = *q++;
5065 for (q = mname; *q; q++)
5066 *p++ = TOLOWER (*q);
5067 *p++ = suffix;
5068 *p = '\0';
5070 optable->handlers[(int) mode].libfunc
5071 = init_one_libfunc (ggc_alloc_string (libfunc_name, p - libfunc_name));
5075 /* Initialize the libfunc fields of an entire group of entries in some
5076 optab which correspond to all integer mode operations. The parameters
5077 have the same meaning as similarly named ones for the `init_libfuncs'
5078 routine. (See above). */
5080 static void
5081 init_integral_libfuncs (optab optable, const char *opname, int suffix)
5083 int maxsize = 2*BITS_PER_WORD;
5084 if (maxsize < LONG_LONG_TYPE_SIZE)
5085 maxsize = LONG_LONG_TYPE_SIZE;
5086 init_libfuncs (optable, word_mode,
5087 mode_for_size (maxsize, MODE_INT, 0),
5088 opname, suffix);
5091 /* Initialize the libfunc fields of an entire group of entries in some
5092 optab which correspond to all real mode operations. The parameters
5093 have the same meaning as similarly named ones for the `init_libfuncs'
5094 routine. (See above). */
5096 static void
5097 init_floating_libfuncs (optab optable, const char *opname, int suffix)
5099 init_libfuncs (optable, MIN_MODE_FLOAT, MAX_MODE_FLOAT, opname, suffix);
5100 init_libfuncs (optable, MIN_MODE_DECIMAL_FLOAT, MAX_MODE_DECIMAL_FLOAT,
5101 opname, suffix);
5104 /* Initialize the libfunc fields of an entire group of entries of an
5105 inter-mode-class conversion optab. The string formation rules are
5106 similar to the ones for init_libfuncs, above, but instead of having
5107 a mode name and an operand count these functions have two mode names
5108 and no operand count. */
5109 static void
5110 init_interclass_conv_libfuncs (convert_optab tab, const char *opname,
5111 enum mode_class from_class,
5112 enum mode_class to_class)
5114 enum machine_mode first_from_mode = GET_CLASS_NARROWEST_MODE (from_class);
5115 enum machine_mode first_to_mode = GET_CLASS_NARROWEST_MODE (to_class);
5116 size_t opname_len = strlen (opname);
5117 size_t max_mname_len = 0;
5119 enum machine_mode fmode, tmode;
5120 const char *fname, *tname;
5121 const char *q;
5122 char *libfunc_name, *suffix;
5123 char *p;
5125 for (fmode = first_from_mode;
5126 fmode != VOIDmode;
5127 fmode = GET_MODE_WIDER_MODE (fmode))
5128 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (fmode)));
5130 for (tmode = first_to_mode;
5131 tmode != VOIDmode;
5132 tmode = GET_MODE_WIDER_MODE (tmode))
5133 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (tmode)));
5135 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
5136 libfunc_name[0] = '_';
5137 libfunc_name[1] = '_';
5138 memcpy (&libfunc_name[2], opname, opname_len);
5139 suffix = libfunc_name + opname_len + 2;
5141 for (fmode = first_from_mode; fmode != VOIDmode;
5142 fmode = GET_MODE_WIDER_MODE (fmode))
5143 for (tmode = first_to_mode; tmode != VOIDmode;
5144 tmode = GET_MODE_WIDER_MODE (tmode))
5146 fname = GET_MODE_NAME (fmode);
5147 tname = GET_MODE_NAME (tmode);
5149 p = suffix;
5150 for (q = fname; *q; p++, q++)
5151 *p = TOLOWER (*q);
5152 for (q = tname; *q; p++, q++)
5153 *p = TOLOWER (*q);
5155 *p = '\0';
5157 tab->handlers[tmode][fmode].libfunc
5158 = init_one_libfunc (ggc_alloc_string (libfunc_name,
5159 p - libfunc_name));
5163 /* Initialize the libfunc fields of an entire group of entries of an
5164 intra-mode-class conversion optab. The string formation rules are
5165 similar to the ones for init_libfunc, above. WIDENING says whether
5166 the optab goes from narrow to wide modes or vice versa. These functions
5167 have two mode names _and_ an operand count. */
5168 static void
5169 init_intraclass_conv_libfuncs (convert_optab tab, const char *opname,
5170 enum mode_class class, bool widening)
5172 enum machine_mode first_mode = GET_CLASS_NARROWEST_MODE (class);
5173 size_t opname_len = strlen (opname);
5174 size_t max_mname_len = 0;
5176 enum machine_mode nmode, wmode;
5177 const char *nname, *wname;
5178 const char *q;
5179 char *libfunc_name, *suffix;
5180 char *p;
5182 for (nmode = first_mode; nmode != VOIDmode;
5183 nmode = GET_MODE_WIDER_MODE (nmode))
5184 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (nmode)));
5186 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
5187 libfunc_name[0] = '_';
5188 libfunc_name[1] = '_';
5189 memcpy (&libfunc_name[2], opname, opname_len);
5190 suffix = libfunc_name + opname_len + 2;
5192 for (nmode = first_mode; nmode != VOIDmode;
5193 nmode = GET_MODE_WIDER_MODE (nmode))
5194 for (wmode = GET_MODE_WIDER_MODE (nmode); wmode != VOIDmode;
5195 wmode = GET_MODE_WIDER_MODE (wmode))
5197 nname = GET_MODE_NAME (nmode);
5198 wname = GET_MODE_NAME (wmode);
5200 p = suffix;
5201 for (q = widening ? nname : wname; *q; p++, q++)
5202 *p = TOLOWER (*q);
5203 for (q = widening ? wname : nname; *q; p++, q++)
5204 *p = TOLOWER (*q);
5206 *p++ = '2';
5207 *p = '\0';
5209 tab->handlers[widening ? wmode : nmode]
5210 [widening ? nmode : wmode].libfunc
5211 = init_one_libfunc (ggc_alloc_string (libfunc_name,
5212 p - libfunc_name));
5218 init_one_libfunc (const char *name)
5220 rtx symbol;
5222 /* Create a FUNCTION_DECL that can be passed to
5223 targetm.encode_section_info. */
5224 /* ??? We don't have any type information except for this is
5225 a function. Pretend this is "int foo()". */
5226 tree decl = build_decl (FUNCTION_DECL, get_identifier (name),
5227 build_function_type (integer_type_node, NULL_TREE));
5228 DECL_ARTIFICIAL (decl) = 1;
5229 DECL_EXTERNAL (decl) = 1;
5230 TREE_PUBLIC (decl) = 1;
5232 symbol = XEXP (DECL_RTL (decl), 0);
5234 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
5235 are the flags assigned by targetm.encode_section_info. */
5236 SET_SYMBOL_REF_DECL (symbol, 0);
5238 return symbol;
5241 /* Call this to reset the function entry for one optab (OPTABLE) in mode
5242 MODE to NAME, which should be either 0 or a string constant. */
5243 void
5244 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
5246 if (name)
5247 optable->handlers[mode].libfunc = init_one_libfunc (name);
5248 else
5249 optable->handlers[mode].libfunc = 0;
5252 /* Call this to reset the function entry for one conversion optab
5253 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
5254 either 0 or a string constant. */
5255 void
5256 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
5257 enum machine_mode fmode, const char *name)
5259 if (name)
5260 optable->handlers[tmode][fmode].libfunc = init_one_libfunc (name);
5261 else
5262 optable->handlers[tmode][fmode].libfunc = 0;
5265 /* Call this once to initialize the contents of the optabs
5266 appropriately for the current target machine. */
5268 void
5269 init_optabs (void)
5271 unsigned int i;
5273 /* Start by initializing all tables to contain CODE_FOR_nothing. */
5275 for (i = 0; i < NUM_RTX_CODE; i++)
5276 setcc_gen_code[i] = CODE_FOR_nothing;
5278 #ifdef HAVE_conditional_move
5279 for (i = 0; i < NUM_MACHINE_MODES; i++)
5280 movcc_gen_code[i] = CODE_FOR_nothing;
5281 #endif
5283 for (i = 0; i < NUM_MACHINE_MODES; i++)
5285 vcond_gen_code[i] = CODE_FOR_nothing;
5286 vcondu_gen_code[i] = CODE_FOR_nothing;
5289 add_optab = init_optab (PLUS);
5290 addv_optab = init_optabv (PLUS);
5291 sub_optab = init_optab (MINUS);
5292 subv_optab = init_optabv (MINUS);
5293 smul_optab = init_optab (MULT);
5294 smulv_optab = init_optabv (MULT);
5295 smul_highpart_optab = init_optab (UNKNOWN);
5296 umul_highpart_optab = init_optab (UNKNOWN);
5297 smul_widen_optab = init_optab (UNKNOWN);
5298 umul_widen_optab = init_optab (UNKNOWN);
5299 usmul_widen_optab = init_optab (UNKNOWN);
5300 sdiv_optab = init_optab (DIV);
5301 sdivv_optab = init_optabv (DIV);
5302 sdivmod_optab = init_optab (UNKNOWN);
5303 udiv_optab = init_optab (UDIV);
5304 udivmod_optab = init_optab (UNKNOWN);
5305 smod_optab = init_optab (MOD);
5306 umod_optab = init_optab (UMOD);
5307 fmod_optab = init_optab (UNKNOWN);
5308 remainder_optab = init_optab (UNKNOWN);
5309 ftrunc_optab = init_optab (UNKNOWN);
5310 and_optab = init_optab (AND);
5311 ior_optab = init_optab (IOR);
5312 xor_optab = init_optab (XOR);
5313 ashl_optab = init_optab (ASHIFT);
5314 ashr_optab = init_optab (ASHIFTRT);
5315 lshr_optab = init_optab (LSHIFTRT);
5316 rotl_optab = init_optab (ROTATE);
5317 rotr_optab = init_optab (ROTATERT);
5318 smin_optab = init_optab (SMIN);
5319 smax_optab = init_optab (SMAX);
5320 umin_optab = init_optab (UMIN);
5321 umax_optab = init_optab (UMAX);
5322 pow_optab = init_optab (UNKNOWN);
5323 atan2_optab = init_optab (UNKNOWN);
5325 /* These three have codes assigned exclusively for the sake of
5326 have_insn_for. */
5327 mov_optab = init_optab (SET);
5328 movstrict_optab = init_optab (STRICT_LOW_PART);
5329 cmp_optab = init_optab (COMPARE);
5331 ucmp_optab = init_optab (UNKNOWN);
5332 tst_optab = init_optab (UNKNOWN);
5334 eq_optab = init_optab (EQ);
5335 ne_optab = init_optab (NE);
5336 gt_optab = init_optab (GT);
5337 ge_optab = init_optab (GE);
5338 lt_optab = init_optab (LT);
5339 le_optab = init_optab (LE);
5340 unord_optab = init_optab (UNORDERED);
5342 neg_optab = init_optab (NEG);
5343 negv_optab = init_optabv (NEG);
5344 abs_optab = init_optab (ABS);
5345 absv_optab = init_optabv (ABS);
5346 addcc_optab = init_optab (UNKNOWN);
5347 one_cmpl_optab = init_optab (NOT);
5348 bswap_optab = init_optab (BSWAP);
5349 ffs_optab = init_optab (FFS);
5350 clz_optab = init_optab (CLZ);
5351 ctz_optab = init_optab (CTZ);
5352 popcount_optab = init_optab (POPCOUNT);
5353 parity_optab = init_optab (PARITY);
5354 sqrt_optab = init_optab (SQRT);
5355 floor_optab = init_optab (UNKNOWN);
5356 ceil_optab = init_optab (UNKNOWN);
5357 round_optab = init_optab (UNKNOWN);
5358 btrunc_optab = init_optab (UNKNOWN);
5359 nearbyint_optab = init_optab (UNKNOWN);
5360 rint_optab = init_optab (UNKNOWN);
5361 sincos_optab = init_optab (UNKNOWN);
5362 sin_optab = init_optab (UNKNOWN);
5363 asin_optab = init_optab (UNKNOWN);
5364 cos_optab = init_optab (UNKNOWN);
5365 acos_optab = init_optab (UNKNOWN);
5366 exp_optab = init_optab (UNKNOWN);
5367 exp10_optab = init_optab (UNKNOWN);
5368 exp2_optab = init_optab (UNKNOWN);
5369 expm1_optab = init_optab (UNKNOWN);
5370 ldexp_optab = init_optab (UNKNOWN);
5371 logb_optab = init_optab (UNKNOWN);
5372 ilogb_optab = init_optab (UNKNOWN);
5373 log_optab = init_optab (UNKNOWN);
5374 log10_optab = init_optab (UNKNOWN);
5375 log2_optab = init_optab (UNKNOWN);
5376 log1p_optab = init_optab (UNKNOWN);
5377 tan_optab = init_optab (UNKNOWN);
5378 atan_optab = init_optab (UNKNOWN);
5379 copysign_optab = init_optab (UNKNOWN);
5381 isinf_optab = init_optab (UNKNOWN);
5383 strlen_optab = init_optab (UNKNOWN);
5384 cbranch_optab = init_optab (UNKNOWN);
5385 cmov_optab = init_optab (UNKNOWN);
5386 cstore_optab = init_optab (UNKNOWN);
5387 push_optab = init_optab (UNKNOWN);
5389 reduc_smax_optab = init_optab (UNKNOWN);
5390 reduc_umax_optab = init_optab (UNKNOWN);
5391 reduc_smin_optab = init_optab (UNKNOWN);
5392 reduc_umin_optab = init_optab (UNKNOWN);
5393 reduc_splus_optab = init_optab (UNKNOWN);
5394 reduc_uplus_optab = init_optab (UNKNOWN);
5396 ssum_widen_optab = init_optab (UNKNOWN);
5397 usum_widen_optab = init_optab (UNKNOWN);
5398 sdot_prod_optab = init_optab (UNKNOWN);
5399 udot_prod_optab = init_optab (UNKNOWN);
5401 vec_extract_optab = init_optab (UNKNOWN);
5402 vec_extract_even_optab = init_optab (UNKNOWN);
5403 vec_extract_odd_optab = init_optab (UNKNOWN);
5404 vec_interleave_high_optab = init_optab (UNKNOWN);
5405 vec_interleave_low_optab = init_optab (UNKNOWN);
5406 vec_set_optab = init_optab (UNKNOWN);
5407 vec_init_optab = init_optab (UNKNOWN);
5408 vec_shl_optab = init_optab (UNKNOWN);
5409 vec_shr_optab = init_optab (UNKNOWN);
5410 vec_realign_load_optab = init_optab (UNKNOWN);
5411 movmisalign_optab = init_optab (UNKNOWN);
5412 vec_widen_umult_hi_optab = init_optab (UNKNOWN);
5413 vec_widen_umult_lo_optab = init_optab (UNKNOWN);
5414 vec_widen_smult_hi_optab = init_optab (UNKNOWN);
5415 vec_widen_smult_lo_optab = init_optab (UNKNOWN);
5416 vec_unpacks_hi_optab = init_optab (UNKNOWN);
5417 vec_unpacks_lo_optab = init_optab (UNKNOWN);
5418 vec_unpacku_hi_optab = init_optab (UNKNOWN);
5419 vec_unpacku_lo_optab = init_optab (UNKNOWN);
5420 vec_pack_mod_optab = init_optab (UNKNOWN);
5421 vec_pack_usat_optab = init_optab (UNKNOWN);
5422 vec_pack_ssat_optab = init_optab (UNKNOWN);
5424 powi_optab = init_optab (UNKNOWN);
5426 /* Conversions. */
5427 sext_optab = init_convert_optab (SIGN_EXTEND);
5428 zext_optab = init_convert_optab (ZERO_EXTEND);
5429 trunc_optab = init_convert_optab (TRUNCATE);
5430 sfix_optab = init_convert_optab (FIX);
5431 ufix_optab = init_convert_optab (UNSIGNED_FIX);
5432 sfixtrunc_optab = init_convert_optab (UNKNOWN);
5433 ufixtrunc_optab = init_convert_optab (UNKNOWN);
5434 sfloat_optab = init_convert_optab (FLOAT);
5435 ufloat_optab = init_convert_optab (UNSIGNED_FLOAT);
5436 lrint_optab = init_convert_optab (UNKNOWN);
5437 lround_optab = init_convert_optab (UNKNOWN);
5438 lfloor_optab = init_convert_optab (UNKNOWN);
5439 lceil_optab = init_convert_optab (UNKNOWN);
5441 for (i = 0; i < NUM_MACHINE_MODES; i++)
5443 movmem_optab[i] = CODE_FOR_nothing;
5444 cmpstr_optab[i] = CODE_FOR_nothing;
5445 cmpstrn_optab[i] = CODE_FOR_nothing;
5446 cmpmem_optab[i] = CODE_FOR_nothing;
5447 setmem_optab[i] = CODE_FOR_nothing;
5449 sync_add_optab[i] = CODE_FOR_nothing;
5450 sync_sub_optab[i] = CODE_FOR_nothing;
5451 sync_ior_optab[i] = CODE_FOR_nothing;
5452 sync_and_optab[i] = CODE_FOR_nothing;
5453 sync_xor_optab[i] = CODE_FOR_nothing;
5454 sync_nand_optab[i] = CODE_FOR_nothing;
5455 sync_old_add_optab[i] = CODE_FOR_nothing;
5456 sync_old_sub_optab[i] = CODE_FOR_nothing;
5457 sync_old_ior_optab[i] = CODE_FOR_nothing;
5458 sync_old_and_optab[i] = CODE_FOR_nothing;
5459 sync_old_xor_optab[i] = CODE_FOR_nothing;
5460 sync_old_nand_optab[i] = CODE_FOR_nothing;
5461 sync_new_add_optab[i] = CODE_FOR_nothing;
5462 sync_new_sub_optab[i] = CODE_FOR_nothing;
5463 sync_new_ior_optab[i] = CODE_FOR_nothing;
5464 sync_new_and_optab[i] = CODE_FOR_nothing;
5465 sync_new_xor_optab[i] = CODE_FOR_nothing;
5466 sync_new_nand_optab[i] = CODE_FOR_nothing;
5467 sync_compare_and_swap[i] = CODE_FOR_nothing;
5468 sync_compare_and_swap_cc[i] = CODE_FOR_nothing;
5469 sync_lock_test_and_set[i] = CODE_FOR_nothing;
5470 sync_lock_release[i] = CODE_FOR_nothing;
5472 reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
5475 /* Fill in the optabs with the insns we support. */
5476 init_all_optabs ();
5478 /* Initialize the optabs with the names of the library functions. */
5479 init_integral_libfuncs (add_optab, "add", '3');
5480 init_floating_libfuncs (add_optab, "add", '3');
5481 init_integral_libfuncs (addv_optab, "addv", '3');
5482 init_floating_libfuncs (addv_optab, "add", '3');
5483 init_integral_libfuncs (sub_optab, "sub", '3');
5484 init_floating_libfuncs (sub_optab, "sub", '3');
5485 init_integral_libfuncs (subv_optab, "subv", '3');
5486 init_floating_libfuncs (subv_optab, "sub", '3');
5487 init_integral_libfuncs (smul_optab, "mul", '3');
5488 init_floating_libfuncs (smul_optab, "mul", '3');
5489 init_integral_libfuncs (smulv_optab, "mulv", '3');
5490 init_floating_libfuncs (smulv_optab, "mul", '3');
5491 init_integral_libfuncs (sdiv_optab, "div", '3');
5492 init_floating_libfuncs (sdiv_optab, "div", '3');
5493 init_integral_libfuncs (sdivv_optab, "divv", '3');
5494 init_integral_libfuncs (udiv_optab, "udiv", '3');
5495 init_integral_libfuncs (sdivmod_optab, "divmod", '4');
5496 init_integral_libfuncs (udivmod_optab, "udivmod", '4');
5497 init_integral_libfuncs (smod_optab, "mod", '3');
5498 init_integral_libfuncs (umod_optab, "umod", '3');
5499 init_floating_libfuncs (ftrunc_optab, "ftrunc", '2');
5500 init_integral_libfuncs (and_optab, "and", '3');
5501 init_integral_libfuncs (ior_optab, "ior", '3');
5502 init_integral_libfuncs (xor_optab, "xor", '3');
5503 init_integral_libfuncs (ashl_optab, "ashl", '3');
5504 init_integral_libfuncs (ashr_optab, "ashr", '3');
5505 init_integral_libfuncs (lshr_optab, "lshr", '3');
5506 init_integral_libfuncs (smin_optab, "min", '3');
5507 init_floating_libfuncs (smin_optab, "min", '3');
5508 init_integral_libfuncs (smax_optab, "max", '3');
5509 init_floating_libfuncs (smax_optab, "max", '3');
5510 init_integral_libfuncs (umin_optab, "umin", '3');
5511 init_integral_libfuncs (umax_optab, "umax", '3');
5512 init_integral_libfuncs (neg_optab, "neg", '2');
5513 init_floating_libfuncs (neg_optab, "neg", '2');
5514 init_integral_libfuncs (negv_optab, "negv", '2');
5515 init_floating_libfuncs (negv_optab, "neg", '2');
5516 init_integral_libfuncs (one_cmpl_optab, "one_cmpl", '2');
5517 init_integral_libfuncs (ffs_optab, "ffs", '2');
5518 init_integral_libfuncs (clz_optab, "clz", '2');
5519 init_integral_libfuncs (ctz_optab, "ctz", '2');
5520 init_integral_libfuncs (popcount_optab, "popcount", '2');
5521 init_integral_libfuncs (parity_optab, "parity", '2');
5523 /* Comparison libcalls for integers MUST come in pairs,
5524 signed/unsigned. */
5525 init_integral_libfuncs (cmp_optab, "cmp", '2');
5526 init_integral_libfuncs (ucmp_optab, "ucmp", '2');
5527 init_floating_libfuncs (cmp_optab, "cmp", '2');
5529 /* EQ etc are floating point only. */
5530 init_floating_libfuncs (eq_optab, "eq", '2');
5531 init_floating_libfuncs (ne_optab, "ne", '2');
5532 init_floating_libfuncs (gt_optab, "gt", '2');
5533 init_floating_libfuncs (ge_optab, "ge", '2');
5534 init_floating_libfuncs (lt_optab, "lt", '2');
5535 init_floating_libfuncs (le_optab, "le", '2');
5536 init_floating_libfuncs (unord_optab, "unord", '2');
5538 init_floating_libfuncs (powi_optab, "powi", '2');
5540 /* Conversions. */
5541 init_interclass_conv_libfuncs (sfloat_optab, "float",
5542 MODE_INT, MODE_FLOAT);
5543 init_interclass_conv_libfuncs (sfloat_optab, "float",
5544 MODE_INT, MODE_DECIMAL_FLOAT);
5545 init_interclass_conv_libfuncs (ufloat_optab, "floatun",
5546 MODE_INT, MODE_FLOAT);
5547 init_interclass_conv_libfuncs (ufloat_optab, "floatun",
5548 MODE_INT, MODE_DECIMAL_FLOAT);
5549 init_interclass_conv_libfuncs (sfix_optab, "fix",
5550 MODE_FLOAT, MODE_INT);
5551 init_interclass_conv_libfuncs (sfix_optab, "fix",
5552 MODE_DECIMAL_FLOAT, MODE_INT);
5553 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5554 MODE_FLOAT, MODE_INT);
5555 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5556 MODE_DECIMAL_FLOAT, MODE_INT);
5557 init_interclass_conv_libfuncs (ufloat_optab, "floatuns",
5558 MODE_INT, MODE_DECIMAL_FLOAT);
5559 init_interclass_conv_libfuncs (lrint_optab, "lrint",
5560 MODE_INT, MODE_FLOAT);
5561 init_interclass_conv_libfuncs (lround_optab, "lround",
5562 MODE_INT, MODE_FLOAT);
5563 init_interclass_conv_libfuncs (lfloor_optab, "lfloor",
5564 MODE_INT, MODE_FLOAT);
5565 init_interclass_conv_libfuncs (lceil_optab, "lceil",
5566 MODE_INT, MODE_FLOAT);
5568 /* sext_optab is also used for FLOAT_EXTEND. */
5569 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, true);
5570 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_DECIMAL_FLOAT, true);
5571 init_interclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, MODE_DECIMAL_FLOAT);
5572 init_interclass_conv_libfuncs (sext_optab, "extend", MODE_DECIMAL_FLOAT, MODE_FLOAT);
5573 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, false);
5574 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_DECIMAL_FLOAT, false);
5575 init_interclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, MODE_DECIMAL_FLOAT);
5576 init_interclass_conv_libfuncs (trunc_optab, "trunc", MODE_DECIMAL_FLOAT, MODE_FLOAT);
5578 /* Explicitly initialize the bswap libfuncs since we need them to be
5579 valid for things other than word_mode. */
5580 set_optab_libfunc (bswap_optab, SImode, "__bswapsi2");
5581 set_optab_libfunc (bswap_optab, DImode, "__bswapdi2");
5583 /* Use cabs for double complex abs, since systems generally have cabs.
5584 Don't define any libcall for float complex, so that cabs will be used. */
5585 if (complex_double_type_node)
5586 abs_optab->handlers[TYPE_MODE (complex_double_type_node)].libfunc
5587 = init_one_libfunc ("cabs");
5589 /* The ffs function operates on `int'. */
5590 ffs_optab->handlers[(int) mode_for_size (INT_TYPE_SIZE, MODE_INT, 0)].libfunc
5591 = init_one_libfunc ("ffs");
5593 abort_libfunc = init_one_libfunc ("abort");
5594 memcpy_libfunc = init_one_libfunc ("memcpy");
5595 memmove_libfunc = init_one_libfunc ("memmove");
5596 memcmp_libfunc = init_one_libfunc ("memcmp");
5597 memset_libfunc = init_one_libfunc ("memset");
5598 setbits_libfunc = init_one_libfunc ("__setbits");
5600 #ifndef DONT_USE_BUILTIN_SETJMP
5601 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
5602 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
5603 #else
5604 setjmp_libfunc = init_one_libfunc ("setjmp");
5605 longjmp_libfunc = init_one_libfunc ("longjmp");
5606 #endif
5607 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
5608 unwind_sjlj_unregister_libfunc
5609 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5611 /* For function entry/exit instrumentation. */
5612 profile_function_entry_libfunc
5613 = init_one_libfunc ("__cyg_profile_func_enter");
5614 profile_function_exit_libfunc
5615 = init_one_libfunc ("__cyg_profile_func_exit");
5617 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
5619 if (HAVE_conditional_trap)
5620 trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
5622 /* Allow the target to add more libcalls or rename some, etc. */
5623 targetm.init_libfuncs ();
5626 #ifdef DEBUG
5628 /* Print information about the current contents of the optabs on
5629 STDERR. */
5631 static void
5632 debug_optab_libfuncs (void)
5634 int i;
5635 int j;
5636 int k;
5638 /* Dump the arithmetic optabs. */
5639 for (i = 0; i != (int) OTI_MAX; i++)
5640 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5642 optab o;
5643 struct optab_handlers *h;
5645 o = optab_table[i];
5646 h = &o->handlers[j];
5647 if (h->libfunc)
5649 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5650 fprintf (stderr, "%s\t%s:\t%s\n",
5651 GET_RTX_NAME (o->code),
5652 GET_MODE_NAME (j),
5653 XSTR (h->libfunc, 0));
5657 /* Dump the conversion optabs. */
5658 for (i = 0; i < (int) COI_MAX; ++i)
5659 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5660 for (k = 0; k < NUM_MACHINE_MODES; ++k)
5662 convert_optab o;
5663 struct optab_handlers *h;
5665 o = &convert_optab_table[i];
5666 h = &o->handlers[j][k];
5667 if (h->libfunc)
5669 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5670 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
5671 GET_RTX_NAME (o->code),
5672 GET_MODE_NAME (j),
5673 GET_MODE_NAME (k),
5674 XSTR (h->libfunc, 0));
5679 #endif /* DEBUG */
5682 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5683 CODE. Return 0 on failure. */
5686 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1,
5687 rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED)
5689 enum machine_mode mode = GET_MODE (op1);
5690 enum insn_code icode;
5691 rtx insn;
5693 if (!HAVE_conditional_trap)
5694 return 0;
5696 if (mode == VOIDmode)
5697 return 0;
5699 icode = cmp_optab->handlers[(int) mode].insn_code;
5700 if (icode == CODE_FOR_nothing)
5701 return 0;
5703 start_sequence ();
5704 op1 = prepare_operand (icode, op1, 0, mode, mode, 0);
5705 op2 = prepare_operand (icode, op2, 1, mode, mode, 0);
5706 if (!op1 || !op2)
5708 end_sequence ();
5709 return 0;
5711 emit_insn (GEN_FCN (icode) (op1, op2));
5713 PUT_CODE (trap_rtx, code);
5714 gcc_assert (HAVE_conditional_trap);
5715 insn = gen_conditional_trap (trap_rtx, tcode);
5716 if (insn)
5718 emit_insn (insn);
5719 insn = get_insns ();
5721 end_sequence ();
5723 return insn;
5726 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5727 or unsigned operation code. */
5729 static enum rtx_code
5730 get_rtx_code (enum tree_code tcode, bool unsignedp)
5732 enum rtx_code code;
5733 switch (tcode)
5735 case EQ_EXPR:
5736 code = EQ;
5737 break;
5738 case NE_EXPR:
5739 code = NE;
5740 break;
5741 case LT_EXPR:
5742 code = unsignedp ? LTU : LT;
5743 break;
5744 case LE_EXPR:
5745 code = unsignedp ? LEU : LE;
5746 break;
5747 case GT_EXPR:
5748 code = unsignedp ? GTU : GT;
5749 break;
5750 case GE_EXPR:
5751 code = unsignedp ? GEU : GE;
5752 break;
5754 case UNORDERED_EXPR:
5755 code = UNORDERED;
5756 break;
5757 case ORDERED_EXPR:
5758 code = ORDERED;
5759 break;
5760 case UNLT_EXPR:
5761 code = UNLT;
5762 break;
5763 case UNLE_EXPR:
5764 code = UNLE;
5765 break;
5766 case UNGT_EXPR:
5767 code = UNGT;
5768 break;
5769 case UNGE_EXPR:
5770 code = UNGE;
5771 break;
5772 case UNEQ_EXPR:
5773 code = UNEQ;
5774 break;
5775 case LTGT_EXPR:
5776 code = LTGT;
5777 break;
5779 default:
5780 gcc_unreachable ();
5782 return code;
5785 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5786 unsigned operators. Do not generate compare instruction. */
5788 static rtx
5789 vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
5791 enum rtx_code rcode;
5792 tree t_op0, t_op1;
5793 rtx rtx_op0, rtx_op1;
5795 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
5796 ensures that condition is a relational operation. */
5797 gcc_assert (COMPARISON_CLASS_P (cond));
5799 rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
5800 t_op0 = TREE_OPERAND (cond, 0);
5801 t_op1 = TREE_OPERAND (cond, 1);
5803 /* Expand operands. */
5804 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)), 1);
5805 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)), 1);
5807 if (!insn_data[icode].operand[4].predicate (rtx_op0, GET_MODE (rtx_op0))
5808 && GET_MODE (rtx_op0) != VOIDmode)
5809 rtx_op0 = force_reg (GET_MODE (rtx_op0), rtx_op0);
5811 if (!insn_data[icode].operand[5].predicate (rtx_op1, GET_MODE (rtx_op1))
5812 && GET_MODE (rtx_op1) != VOIDmode)
5813 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
5815 return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1);
5818 /* Return insn code for VEC_COND_EXPR EXPR. */
5820 static inline enum insn_code
5821 get_vcond_icode (tree expr, enum machine_mode mode)
5823 enum insn_code icode = CODE_FOR_nothing;
5825 if (TYPE_UNSIGNED (TREE_TYPE (expr)))
5826 icode = vcondu_gen_code[mode];
5827 else
5828 icode = vcond_gen_code[mode];
5829 return icode;
5832 /* Return TRUE iff, appropriate vector insns are available
5833 for vector cond expr expr in VMODE mode. */
5835 bool
5836 expand_vec_cond_expr_p (tree expr, enum machine_mode vmode)
5838 if (get_vcond_icode (expr, vmode) == CODE_FOR_nothing)
5839 return false;
5840 return true;
5843 /* Generate insns for VEC_COND_EXPR. */
5846 expand_vec_cond_expr (tree vec_cond_expr, rtx target)
5848 enum insn_code icode;
5849 rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1;
5850 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_cond_expr));
5851 bool unsignedp = TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr));
5853 icode = get_vcond_icode (vec_cond_expr, mode);
5854 if (icode == CODE_FOR_nothing)
5855 return 0;
5857 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5858 target = gen_reg_rtx (mode);
5860 /* Get comparison rtx. First expand both cond expr operands. */
5861 comparison = vector_compare_rtx (TREE_OPERAND (vec_cond_expr, 0),
5862 unsignedp, icode);
5863 cc_op0 = XEXP (comparison, 0);
5864 cc_op1 = XEXP (comparison, 1);
5865 /* Expand both operands and force them in reg, if required. */
5866 rtx_op1 = expand_expr (TREE_OPERAND (vec_cond_expr, 1),
5867 NULL_RTX, VOIDmode, EXPAND_NORMAL);
5868 if (!insn_data[icode].operand[1].predicate (rtx_op1, mode)
5869 && mode != VOIDmode)
5870 rtx_op1 = force_reg (mode, rtx_op1);
5872 rtx_op2 = expand_expr (TREE_OPERAND (vec_cond_expr, 2),
5873 NULL_RTX, VOIDmode, EXPAND_NORMAL);
5874 if (!insn_data[icode].operand[2].predicate (rtx_op2, mode)
5875 && mode != VOIDmode)
5876 rtx_op2 = force_reg (mode, rtx_op2);
5878 /* Emit instruction! */
5879 emit_insn (GEN_FCN (icode) (target, rtx_op1, rtx_op2,
5880 comparison, cc_op0, cc_op1));
5882 return target;
5886 /* This is an internal subroutine of the other compare_and_swap expanders.
5887 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
5888 operation. TARGET is an optional place to store the value result of
5889 the operation. ICODE is the particular instruction to expand. Return
5890 the result of the operation. */
5892 static rtx
5893 expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val,
5894 rtx target, enum insn_code icode)
5896 enum machine_mode mode = GET_MODE (mem);
5897 rtx insn;
5899 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5900 target = gen_reg_rtx (mode);
5902 if (GET_MODE (old_val) != VOIDmode && GET_MODE (old_val) != mode)
5903 old_val = convert_modes (mode, GET_MODE (old_val), old_val, 1);
5904 if (!insn_data[icode].operand[2].predicate (old_val, mode))
5905 old_val = force_reg (mode, old_val);
5907 if (GET_MODE (new_val) != VOIDmode && GET_MODE (new_val) != mode)
5908 new_val = convert_modes (mode, GET_MODE (new_val), new_val, 1);
5909 if (!insn_data[icode].operand[3].predicate (new_val, mode))
5910 new_val = force_reg (mode, new_val);
5912 insn = GEN_FCN (icode) (target, mem, old_val, new_val);
5913 if (insn == NULL_RTX)
5914 return NULL_RTX;
5915 emit_insn (insn);
5917 return target;
5920 /* Expand a compare-and-swap operation and return its value. */
5923 expand_val_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
5925 enum machine_mode mode = GET_MODE (mem);
5926 enum insn_code icode = sync_compare_and_swap[mode];
5928 if (icode == CODE_FOR_nothing)
5929 return NULL_RTX;
5931 return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode);
5934 /* Expand a compare-and-swap operation and store true into the result if
5935 the operation was successful and false otherwise. Return the result.
5936 Unlike other routines, TARGET is not optional. */
5939 expand_bool_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
5941 enum machine_mode mode = GET_MODE (mem);
5942 enum insn_code icode;
5943 rtx subtarget, label0, label1;
5945 /* If the target supports a compare-and-swap pattern that simultaneously
5946 sets some flag for success, then use it. Otherwise use the regular
5947 compare-and-swap and follow that immediately with a compare insn. */
5948 icode = sync_compare_and_swap_cc[mode];
5949 switch (icode)
5951 default:
5952 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
5953 NULL_RTX, icode);
5954 if (subtarget != NULL_RTX)
5955 break;
5957 /* FALLTHRU */
5958 case CODE_FOR_nothing:
5959 icode = sync_compare_and_swap[mode];
5960 if (icode == CODE_FOR_nothing)
5961 return NULL_RTX;
5963 /* Ensure that if old_val == mem, that we're not comparing
5964 against an old value. */
5965 if (MEM_P (old_val))
5966 old_val = force_reg (mode, old_val);
5968 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
5969 NULL_RTX, icode);
5970 if (subtarget == NULL_RTX)
5971 return NULL_RTX;
5973 emit_cmp_insn (subtarget, old_val, EQ, const0_rtx, mode, true);
5976 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
5977 setcc instruction from the beginning. We don't work too hard here,
5978 but it's nice to not be stupid about initial code gen either. */
5979 if (STORE_FLAG_VALUE == 1)
5981 icode = setcc_gen_code[EQ];
5982 if (icode != CODE_FOR_nothing)
5984 enum machine_mode cmode = insn_data[icode].operand[0].mode;
5985 rtx insn;
5987 subtarget = target;
5988 if (!insn_data[icode].operand[0].predicate (target, cmode))
5989 subtarget = gen_reg_rtx (cmode);
5991 insn = GEN_FCN (icode) (subtarget);
5992 if (insn)
5994 emit_insn (insn);
5995 if (GET_MODE (target) != GET_MODE (subtarget))
5997 convert_move (target, subtarget, 1);
5998 subtarget = target;
6000 return subtarget;
6005 /* Without an appropriate setcc instruction, use a set of branches to
6006 get 1 and 0 stored into target. Presumably if the target has a
6007 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
6009 label0 = gen_label_rtx ();
6010 label1 = gen_label_rtx ();
6012 emit_jump_insn (bcc_gen_fctn[EQ] (label0));
6013 emit_move_insn (target, const0_rtx);
6014 emit_jump_insn (gen_jump (label1));
6015 emit_barrier ();
6016 emit_label (label0);
6017 emit_move_insn (target, const1_rtx);
6018 emit_label (label1);
6020 return target;
6023 /* This is a helper function for the other atomic operations. This function
6024 emits a loop that contains SEQ that iterates until a compare-and-swap
6025 operation at the end succeeds. MEM is the memory to be modified. SEQ is
6026 a set of instructions that takes a value from OLD_REG as an input and
6027 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
6028 set to the current contents of MEM. After SEQ, a compare-and-swap will
6029 attempt to update MEM with NEW_REG. The function returns true when the
6030 loop was generated successfully. */
6032 static bool
6033 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
6035 enum machine_mode mode = GET_MODE (mem);
6036 enum insn_code icode;
6037 rtx label, cmp_reg, subtarget;
6039 /* The loop we want to generate looks like
6041 cmp_reg = mem;
6042 label:
6043 old_reg = cmp_reg;
6044 seq;
6045 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
6046 if (cmp_reg != old_reg)
6047 goto label;
6049 Note that we only do the plain load from memory once. Subsequent
6050 iterations use the value loaded by the compare-and-swap pattern. */
6052 label = gen_label_rtx ();
6053 cmp_reg = gen_reg_rtx (mode);
6055 emit_move_insn (cmp_reg, mem);
6056 emit_label (label);
6057 emit_move_insn (old_reg, cmp_reg);
6058 if (seq)
6059 emit_insn (seq);
6061 /* If the target supports a compare-and-swap pattern that simultaneously
6062 sets some flag for success, then use it. Otherwise use the regular
6063 compare-and-swap and follow that immediately with a compare insn. */
6064 icode = sync_compare_and_swap_cc[mode];
6065 switch (icode)
6067 default:
6068 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
6069 cmp_reg, icode);
6070 if (subtarget != NULL_RTX)
6072 gcc_assert (subtarget == cmp_reg);
6073 break;
6076 /* FALLTHRU */
6077 case CODE_FOR_nothing:
6078 icode = sync_compare_and_swap[mode];
6079 if (icode == CODE_FOR_nothing)
6080 return false;
6082 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
6083 cmp_reg, icode);
6084 if (subtarget == NULL_RTX)
6085 return false;
6086 if (subtarget != cmp_reg)
6087 emit_move_insn (cmp_reg, subtarget);
6089 emit_cmp_insn (cmp_reg, old_reg, EQ, const0_rtx, mode, true);
6092 /* ??? Mark this jump predicted not taken? */
6093 emit_jump_insn (bcc_gen_fctn[NE] (label));
6095 return true;
6098 /* This function generates the atomic operation MEM CODE= VAL. In this
6099 case, we do not care about any resulting value. Returns NULL if we
6100 cannot generate the operation. */
6103 expand_sync_operation (rtx mem, rtx val, enum rtx_code code)
6105 enum machine_mode mode = GET_MODE (mem);
6106 enum insn_code icode;
6107 rtx insn;
6109 /* Look to see if the target supports the operation directly. */
6110 switch (code)
6112 case PLUS:
6113 icode = sync_add_optab[mode];
6114 break;
6115 case IOR:
6116 icode = sync_ior_optab[mode];
6117 break;
6118 case XOR:
6119 icode = sync_xor_optab[mode];
6120 break;
6121 case AND:
6122 icode = sync_and_optab[mode];
6123 break;
6124 case NOT:
6125 icode = sync_nand_optab[mode];
6126 break;
6128 case MINUS:
6129 icode = sync_sub_optab[mode];
6130 if (icode == CODE_FOR_nothing)
6132 icode = sync_add_optab[mode];
6133 if (icode != CODE_FOR_nothing)
6135 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
6136 code = PLUS;
6139 break;
6141 default:
6142 gcc_unreachable ();
6145 /* Generate the direct operation, if present. */
6146 if (icode != CODE_FOR_nothing)
6148 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6149 val = convert_modes (mode, GET_MODE (val), val, 1);
6150 if (!insn_data[icode].operand[1].predicate (val, mode))
6151 val = force_reg (mode, val);
6153 insn = GEN_FCN (icode) (mem, val);
6154 if (insn)
6156 emit_insn (insn);
6157 return const0_rtx;
6161 /* Failing that, generate a compare-and-swap loop in which we perform the
6162 operation with normal arithmetic instructions. */
6163 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6165 rtx t0 = gen_reg_rtx (mode), t1;
6167 start_sequence ();
6169 t1 = t0;
6170 if (code == NOT)
6172 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6173 code = AND;
6175 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6176 true, OPTAB_LIB_WIDEN);
6178 insn = get_insns ();
6179 end_sequence ();
6181 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6182 return const0_rtx;
6185 return NULL_RTX;
6188 /* This function generates the atomic operation MEM CODE= VAL. In this
6189 case, we do care about the resulting value: if AFTER is true then
6190 return the value MEM holds after the operation, if AFTER is false
6191 then return the value MEM holds before the operation. TARGET is an
6192 optional place for the result value to be stored. */
6195 expand_sync_fetch_operation (rtx mem, rtx val, enum rtx_code code,
6196 bool after, rtx target)
6198 enum machine_mode mode = GET_MODE (mem);
6199 enum insn_code old_code, new_code, icode;
6200 bool compensate;
6201 rtx insn;
6203 /* Look to see if the target supports the operation directly. */
6204 switch (code)
6206 case PLUS:
6207 old_code = sync_old_add_optab[mode];
6208 new_code = sync_new_add_optab[mode];
6209 break;
6210 case IOR:
6211 old_code = sync_old_ior_optab[mode];
6212 new_code = sync_new_ior_optab[mode];
6213 break;
6214 case XOR:
6215 old_code = sync_old_xor_optab[mode];
6216 new_code = sync_new_xor_optab[mode];
6217 break;
6218 case AND:
6219 old_code = sync_old_and_optab[mode];
6220 new_code = sync_new_and_optab[mode];
6221 break;
6222 case NOT:
6223 old_code = sync_old_nand_optab[mode];
6224 new_code = sync_new_nand_optab[mode];
6225 break;
6227 case MINUS:
6228 old_code = sync_old_sub_optab[mode];
6229 new_code = sync_new_sub_optab[mode];
6230 if (old_code == CODE_FOR_nothing && new_code == CODE_FOR_nothing)
6232 old_code = sync_old_add_optab[mode];
6233 new_code = sync_new_add_optab[mode];
6234 if (old_code != CODE_FOR_nothing || new_code != CODE_FOR_nothing)
6236 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
6237 code = PLUS;
6240 break;
6242 default:
6243 gcc_unreachable ();
6246 /* If the target does supports the proper new/old operation, great. But
6247 if we only support the opposite old/new operation, check to see if we
6248 can compensate. In the case in which the old value is supported, then
6249 we can always perform the operation again with normal arithmetic. In
6250 the case in which the new value is supported, then we can only handle
6251 this in the case the operation is reversible. */
6252 compensate = false;
6253 if (after)
6255 icode = new_code;
6256 if (icode == CODE_FOR_nothing)
6258 icode = old_code;
6259 if (icode != CODE_FOR_nothing)
6260 compensate = true;
6263 else
6265 icode = old_code;
6266 if (icode == CODE_FOR_nothing
6267 && (code == PLUS || code == MINUS || code == XOR))
6269 icode = new_code;
6270 if (icode != CODE_FOR_nothing)
6271 compensate = true;
6275 /* If we found something supported, great. */
6276 if (icode != CODE_FOR_nothing)
6278 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6279 target = gen_reg_rtx (mode);
6281 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6282 val = convert_modes (mode, GET_MODE (val), val, 1);
6283 if (!insn_data[icode].operand[2].predicate (val, mode))
6284 val = force_reg (mode, val);
6286 insn = GEN_FCN (icode) (target, mem, val);
6287 if (insn)
6289 emit_insn (insn);
6291 /* If we need to compensate for using an operation with the
6292 wrong return value, do so now. */
6293 if (compensate)
6295 if (!after)
6297 if (code == PLUS)
6298 code = MINUS;
6299 else if (code == MINUS)
6300 code = PLUS;
6303 if (code == NOT)
6304 target = expand_simple_unop (mode, NOT, target, NULL_RTX, true);
6305 target = expand_simple_binop (mode, code, target, val, NULL_RTX,
6306 true, OPTAB_LIB_WIDEN);
6309 return target;
6313 /* Failing that, generate a compare-and-swap loop in which we perform the
6314 operation with normal arithmetic instructions. */
6315 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6317 rtx t0 = gen_reg_rtx (mode), t1;
6319 if (!target || !register_operand (target, mode))
6320 target = gen_reg_rtx (mode);
6322 start_sequence ();
6324 if (!after)
6325 emit_move_insn (target, t0);
6326 t1 = t0;
6327 if (code == NOT)
6329 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6330 code = AND;
6332 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6333 true, OPTAB_LIB_WIDEN);
6334 if (after)
6335 emit_move_insn (target, t1);
6337 insn = get_insns ();
6338 end_sequence ();
6340 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6341 return target;
6344 return NULL_RTX;
6347 /* This function expands a test-and-set operation. Ideally we atomically
6348 store VAL in MEM and return the previous value in MEM. Some targets
6349 may not support this operation and only support VAL with the constant 1;
6350 in this case while the return value will be 0/1, but the exact value
6351 stored in MEM is target defined. TARGET is an option place to stick
6352 the return value. */
6355 expand_sync_lock_test_and_set (rtx mem, rtx val, rtx target)
6357 enum machine_mode mode = GET_MODE (mem);
6358 enum insn_code icode;
6359 rtx insn;
6361 /* If the target supports the test-and-set directly, great. */
6362 icode = sync_lock_test_and_set[mode];
6363 if (icode != CODE_FOR_nothing)
6365 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6366 target = gen_reg_rtx (mode);
6368 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6369 val = convert_modes (mode, GET_MODE (val), val, 1);
6370 if (!insn_data[icode].operand[2].predicate (val, mode))
6371 val = force_reg (mode, val);
6373 insn = GEN_FCN (icode) (target, mem, val);
6374 if (insn)
6376 emit_insn (insn);
6377 return target;
6381 /* Otherwise, use a compare-and-swap loop for the exchange. */
6382 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6384 if (!target || !register_operand (target, mode))
6385 target = gen_reg_rtx (mode);
6386 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6387 val = convert_modes (mode, GET_MODE (val), val, 1);
6388 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
6389 return target;
6392 return NULL_RTX;
6395 #include "gt-optabs.h"