* config/m68k/m68k.md (bungt_rev): New pattern.
[official-gcc.git] / gcc / optabs.c
blobfe7ae065962835080c1af5ba57b1e698ecb1740d
1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "toplev.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
32 #include "rtl.h"
33 #include "tree.h"
34 #include "tm_p.h"
35 #include "flags.h"
36 #include "function.h"
37 #include "except.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "libfuncs.h"
41 #include "recog.h"
42 #include "reload.h"
43 #include "ggc.h"
44 #include "real.h"
45 #include "basic-block.h"
46 #include "target.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 optab optab_table[OTI_MAX];
59 rtx libfunc_table[LTI_MAX];
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table[COI_MAX];
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab[NUM_RTX_CODE + 1];
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
70 rtxfun bcc_gen_fctn[NUM_RTX_CODE];
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
76 enum insn_code setcc_gen_code[NUM_RTX_CODE];
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
84 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
85 #endif
87 /* Indexed by the machine mode, gives the insn code for vector conditional
88 operation. */
90 enum insn_code vcond_gen_code[NUM_MACHINE_MODES];
91 enum insn_code vcondu_gen_code[NUM_MACHINE_MODES];
93 /* The insn generating function can not take an rtx_code argument.
94 TRAP_RTX is used as an rtx argument. Its code is replaced with
95 the code to be used in the trap insn and all other fields are ignored. */
96 static GTY(()) rtx trap_rtx;
98 static int add_equal_note (rtx, rtx, enum rtx_code, rtx, rtx);
99 static rtx widen_operand (rtx, enum machine_mode, enum machine_mode, int,
100 int);
101 static void prepare_cmp_insn (rtx *, rtx *, enum rtx_code *, rtx,
102 enum machine_mode *, int *,
103 enum can_compare_purpose);
104 static enum insn_code can_fix_p (enum machine_mode, enum machine_mode, int,
105 int *);
106 static enum insn_code can_float_p (enum machine_mode, enum machine_mode, int);
107 static optab new_optab (void);
108 static convert_optab new_convert_optab (void);
109 static inline optab init_optab (enum rtx_code);
110 static inline optab init_optabv (enum rtx_code);
111 static inline convert_optab init_convert_optab (enum rtx_code);
112 static void init_libfuncs (optab, int, int, const char *, int);
113 static void init_integral_libfuncs (optab, const char *, int);
114 static void init_floating_libfuncs (optab, const char *, int);
115 static void init_interclass_conv_libfuncs (convert_optab, const char *,
116 enum mode_class, enum mode_class);
117 static void init_intraclass_conv_libfuncs (convert_optab, const char *,
118 enum mode_class, bool);
119 static void emit_cmp_and_jump_insn_1 (rtx, rtx, enum machine_mode,
120 enum rtx_code, int, rtx);
121 static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *,
122 enum machine_mode *, int *);
123 static rtx widen_clz (enum machine_mode, rtx, rtx);
124 static rtx expand_parity (enum machine_mode, rtx, rtx);
125 static enum rtx_code get_rtx_code (enum tree_code, bool);
126 static rtx vector_compare_rtx (tree, bool, enum insn_code);
128 #ifndef HAVE_conditional_trap
129 #define HAVE_conditional_trap 0
130 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
131 #endif
133 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
134 the result of operation CODE applied to OP0 (and OP1 if it is a binary
135 operation).
137 If the last insn does not set TARGET, don't do anything, but return 1.
139 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
140 don't add the REG_EQUAL note but return 0. Our caller can then try
141 again, ensuring that TARGET is not one of the operands. */
143 static int
144 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
146 rtx last_insn, insn, set;
147 rtx note;
149 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
151 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
152 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
153 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
154 && GET_RTX_CLASS (code) != RTX_COMPARE
155 && GET_RTX_CLASS (code) != RTX_UNARY)
156 return 1;
158 if (GET_CODE (target) == ZERO_EXTRACT)
159 return 1;
161 for (last_insn = insns;
162 NEXT_INSN (last_insn) != NULL_RTX;
163 last_insn = NEXT_INSN (last_insn))
166 set = single_set (last_insn);
167 if (set == NULL_RTX)
168 return 1;
170 if (! rtx_equal_p (SET_DEST (set), target)
171 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
172 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
173 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
174 return 1;
176 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
177 besides the last insn. */
178 if (reg_overlap_mentioned_p (target, op0)
179 || (op1 && reg_overlap_mentioned_p (target, op1)))
181 insn = PREV_INSN (last_insn);
182 while (insn != NULL_RTX)
184 if (reg_set_p (target, insn))
185 return 0;
187 insn = PREV_INSN (insn);
191 if (GET_RTX_CLASS (code) == RTX_UNARY)
192 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
193 else
194 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
196 set_unique_reg_note (last_insn, REG_EQUAL, note);
198 return 1;
201 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
202 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
203 not actually do a sign-extend or zero-extend, but can leave the
204 higher-order bits of the result rtx undefined, for example, in the case
205 of logical operations, but not right shifts. */
207 static rtx
208 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
209 int unsignedp, int no_extend)
211 rtx result;
213 /* If we don't have to extend and this is a constant, return it. */
214 if (no_extend && GET_MODE (op) == VOIDmode)
215 return op;
217 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
218 extend since it will be more efficient to do so unless the signedness of
219 a promoted object differs from our extension. */
220 if (! no_extend
221 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
222 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
223 return convert_modes (mode, oldmode, op, unsignedp);
225 /* If MODE is no wider than a single word, we return a paradoxical
226 SUBREG. */
227 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
228 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
230 /* Otherwise, get an object of MODE, clobber it, and set the low-order
231 part to OP. */
233 result = gen_reg_rtx (mode);
234 emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
235 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
236 return result;
239 /* Return the optab used for computing the operation given by
240 the tree code, CODE. This function is not always usable (for
241 example, it cannot give complete results for multiplication
242 or division) but probably ought to be relied on more widely
243 throughout the expander. */
244 optab
245 optab_for_tree_code (enum tree_code code, tree type)
247 bool trapv;
248 switch (code)
250 case BIT_AND_EXPR:
251 return and_optab;
253 case BIT_IOR_EXPR:
254 return ior_optab;
256 case BIT_NOT_EXPR:
257 return one_cmpl_optab;
259 case BIT_XOR_EXPR:
260 return xor_optab;
262 case TRUNC_MOD_EXPR:
263 case CEIL_MOD_EXPR:
264 case FLOOR_MOD_EXPR:
265 case ROUND_MOD_EXPR:
266 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
268 case RDIV_EXPR:
269 case TRUNC_DIV_EXPR:
270 case CEIL_DIV_EXPR:
271 case FLOOR_DIV_EXPR:
272 case ROUND_DIV_EXPR:
273 case EXACT_DIV_EXPR:
274 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
276 case LSHIFT_EXPR:
277 return ashl_optab;
279 case RSHIFT_EXPR:
280 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
282 case LROTATE_EXPR:
283 return rotl_optab;
285 case RROTATE_EXPR:
286 return rotr_optab;
288 case MAX_EXPR:
289 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
291 case MIN_EXPR:
292 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
294 case REALIGN_LOAD_EXPR:
295 return vec_realign_load_optab;
297 case WIDEN_SUM_EXPR:
298 return TYPE_UNSIGNED (type) ? usum_widen_optab : ssum_widen_optab;
300 case DOT_PROD_EXPR:
301 return TYPE_UNSIGNED (type) ? udot_prod_optab : sdot_prod_optab;
303 case REDUC_MAX_EXPR:
304 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
306 case REDUC_MIN_EXPR:
307 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
309 case REDUC_PLUS_EXPR:
310 return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
312 case VEC_LSHIFT_EXPR:
313 return vec_shl_optab;
315 case VEC_RSHIFT_EXPR:
316 return vec_shr_optab;
318 default:
319 break;
322 trapv = flag_trapv && INTEGRAL_TYPE_P (type) && !TYPE_UNSIGNED (type);
323 switch (code)
325 case PLUS_EXPR:
326 return trapv ? addv_optab : add_optab;
328 case MINUS_EXPR:
329 return trapv ? subv_optab : sub_optab;
331 case MULT_EXPR:
332 return trapv ? smulv_optab : smul_optab;
334 case NEGATE_EXPR:
335 return trapv ? negv_optab : neg_optab;
337 case ABS_EXPR:
338 return trapv ? absv_optab : abs_optab;
340 default:
341 return NULL;
346 /* Expand vector widening operations.
348 There are two different classes of operations handled here:
349 1) Operations whose result is wider than all the arguments to the operation.
350 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
351 In this case OP0 and optionally OP1 would be initialized,
352 but WIDE_OP wouldn't (not relevant for this case).
353 2) Operations whose result is of the same size as the last argument to the
354 operation, but wider than all the other arguments to the operation.
355 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
356 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
358 E.g, when called to expand the following operations, this is how
359 the arguments will be initialized:
360 nops OP0 OP1 WIDE_OP
361 widening-sum 2 oprnd0 - oprnd1
362 widening-dot-product 3 oprnd0 oprnd1 oprnd2
363 widening-mult 2 oprnd0 oprnd1 -
364 type-promotion (vec-unpack) 1 oprnd0 - - */
367 expand_widen_pattern_expr (tree exp, rtx op0, rtx op1, rtx wide_op, rtx target,
368 int unsignedp)
370 tree oprnd0, oprnd1, oprnd2;
371 enum machine_mode wmode = 0, tmode0, tmode1 = 0;
372 optab widen_pattern_optab;
373 int icode;
374 enum machine_mode xmode0, xmode1 = 0, wxmode = 0;
375 rtx temp;
376 rtx pat;
377 rtx xop0, xop1, wxop;
378 int nops = TREE_CODE_LENGTH (TREE_CODE (exp));
380 oprnd0 = TREE_OPERAND (exp, 0);
381 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
382 widen_pattern_optab =
383 optab_for_tree_code (TREE_CODE (exp), TREE_TYPE (oprnd0));
384 icode = (int) widen_pattern_optab->handlers[(int) tmode0].insn_code;
385 gcc_assert (icode != CODE_FOR_nothing);
386 xmode0 = insn_data[icode].operand[1].mode;
388 if (nops >= 2)
390 oprnd1 = TREE_OPERAND (exp, 1);
391 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
392 xmode1 = insn_data[icode].operand[2].mode;
395 /* The last operand is of a wider mode than the rest of the operands. */
396 if (nops == 2)
398 wmode = tmode1;
399 wxmode = xmode1;
401 else if (nops == 3)
403 gcc_assert (tmode1 == tmode0);
404 gcc_assert (op1);
405 oprnd2 = TREE_OPERAND (exp, 2);
406 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
407 wxmode = insn_data[icode].operand[3].mode;
410 if (!wide_op)
411 wmode = wxmode = insn_data[icode].operand[0].mode;
413 if (!target
414 || ! (*insn_data[icode].operand[0].predicate) (target, wmode))
415 temp = gen_reg_rtx (wmode);
416 else
417 temp = target;
419 xop0 = op0;
420 xop1 = op1;
421 wxop = wide_op;
423 /* In case the insn wants input operands in modes different from
424 those of the actual operands, convert the operands. It would
425 seem that we don't need to convert CONST_INTs, but we do, so
426 that they're properly zero-extended, sign-extended or truncated
427 for their mode. */
429 if (GET_MODE (op0) != xmode0 && xmode0 != VOIDmode)
430 xop0 = convert_modes (xmode0,
431 GET_MODE (op0) != VOIDmode
432 ? GET_MODE (op0)
433 : tmode0,
434 xop0, unsignedp);
436 if (op1)
437 if (GET_MODE (op1) != xmode1 && xmode1 != VOIDmode)
438 xop1 = convert_modes (xmode1,
439 GET_MODE (op1) != VOIDmode
440 ? GET_MODE (op1)
441 : tmode1,
442 xop1, unsignedp);
444 if (wide_op)
445 if (GET_MODE (wide_op) != wxmode && wxmode != VOIDmode)
446 wxop = convert_modes (wxmode,
447 GET_MODE (wide_op) != VOIDmode
448 ? GET_MODE (wide_op)
449 : wmode,
450 wxop, unsignedp);
452 /* Now, if insn's predicates don't allow our operands, put them into
453 pseudo regs. */
455 if (! (*insn_data[icode].operand[1].predicate) (xop0, xmode0)
456 && xmode0 != VOIDmode)
457 xop0 = copy_to_mode_reg (xmode0, xop0);
459 if (op1)
461 if (! (*insn_data[icode].operand[2].predicate) (xop1, xmode1)
462 && xmode1 != VOIDmode)
463 xop1 = copy_to_mode_reg (xmode1, xop1);
465 if (wide_op)
467 if (! (*insn_data[icode].operand[3].predicate) (wxop, wxmode)
468 && wxmode != VOIDmode)
469 wxop = copy_to_mode_reg (wxmode, wxop);
471 pat = GEN_FCN (icode) (temp, xop0, xop1, wxop);
473 else
474 pat = GEN_FCN (icode) (temp, xop0, xop1);
476 else
478 if (wide_op)
480 if (! (*insn_data[icode].operand[2].predicate) (wxop, wxmode)
481 && wxmode != VOIDmode)
482 wxop = copy_to_mode_reg (wxmode, wxop);
484 pat = GEN_FCN (icode) (temp, xop0, wxop);
486 else
487 pat = GEN_FCN (icode) (temp, xop0);
490 emit_insn (pat);
491 return temp;
494 /* Generate code to perform an operation specified by TERNARY_OPTAB
495 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
497 UNSIGNEDP is for the case where we have to widen the operands
498 to perform the operation. It says to use zero-extension.
500 If TARGET is nonzero, the value
501 is generated there, if it is convenient to do so.
502 In all cases an rtx is returned for the locus of the value;
503 this may or may not be TARGET. */
506 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
507 rtx op1, rtx op2, rtx target, int unsignedp)
509 int icode = (int) ternary_optab->handlers[(int) mode].insn_code;
510 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
511 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
512 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
513 rtx temp;
514 rtx pat;
515 rtx xop0 = op0, xop1 = op1, xop2 = op2;
517 gcc_assert (ternary_optab->handlers[(int) mode].insn_code
518 != CODE_FOR_nothing);
520 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
521 temp = gen_reg_rtx (mode);
522 else
523 temp = target;
525 /* In case the insn wants input operands in modes different from
526 those of the actual operands, convert the operands. It would
527 seem that we don't need to convert CONST_INTs, but we do, so
528 that they're properly zero-extended, sign-extended or truncated
529 for their mode. */
531 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
532 xop0 = convert_modes (mode0,
533 GET_MODE (op0) != VOIDmode
534 ? GET_MODE (op0)
535 : mode,
536 xop0, unsignedp);
538 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
539 xop1 = convert_modes (mode1,
540 GET_MODE (op1) != VOIDmode
541 ? GET_MODE (op1)
542 : mode,
543 xop1, unsignedp);
545 if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
546 xop2 = convert_modes (mode2,
547 GET_MODE (op2) != VOIDmode
548 ? GET_MODE (op2)
549 : mode,
550 xop2, unsignedp);
552 /* Now, if insn's predicates don't allow our operands, put them into
553 pseudo regs. */
555 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
556 && mode0 != VOIDmode)
557 xop0 = copy_to_mode_reg (mode0, xop0);
559 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
560 && mode1 != VOIDmode)
561 xop1 = copy_to_mode_reg (mode1, xop1);
563 if (!insn_data[icode].operand[3].predicate (xop2, mode2)
564 && mode2 != VOIDmode)
565 xop2 = copy_to_mode_reg (mode2, xop2);
567 pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
569 emit_insn (pat);
570 return temp;
574 /* Like expand_binop, but return a constant rtx if the result can be
575 calculated at compile time. The arguments and return value are
576 otherwise the same as for expand_binop. */
578 static rtx
579 simplify_expand_binop (enum machine_mode mode, optab binoptab,
580 rtx op0, rtx op1, rtx target, int unsignedp,
581 enum optab_methods methods)
583 if (CONSTANT_P (op0) && CONSTANT_P (op1))
585 rtx x = simplify_binary_operation (binoptab->code, mode, op0, op1);
587 if (x)
588 return x;
591 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
594 /* Like simplify_expand_binop, but always put the result in TARGET.
595 Return true if the expansion succeeded. */
597 bool
598 force_expand_binop (enum machine_mode mode, optab binoptab,
599 rtx op0, rtx op1, rtx target, int unsignedp,
600 enum optab_methods methods)
602 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
603 target, unsignedp, methods);
604 if (x == 0)
605 return false;
606 if (x != target)
607 emit_move_insn (target, x);
608 return true;
611 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
614 expand_vec_shift_expr (tree vec_shift_expr, rtx target)
616 enum insn_code icode;
617 rtx rtx_op1, rtx_op2;
618 enum machine_mode mode1;
619 enum machine_mode mode2;
620 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_shift_expr));
621 tree vec_oprnd = TREE_OPERAND (vec_shift_expr, 0);
622 tree shift_oprnd = TREE_OPERAND (vec_shift_expr, 1);
623 optab shift_optab;
624 rtx pat;
626 switch (TREE_CODE (vec_shift_expr))
628 case VEC_RSHIFT_EXPR:
629 shift_optab = vec_shr_optab;
630 break;
631 case VEC_LSHIFT_EXPR:
632 shift_optab = vec_shl_optab;
633 break;
634 default:
635 gcc_unreachable ();
638 icode = (int) shift_optab->handlers[(int) mode].insn_code;
639 gcc_assert (icode != CODE_FOR_nothing);
641 mode1 = insn_data[icode].operand[1].mode;
642 mode2 = insn_data[icode].operand[2].mode;
644 rtx_op1 = expand_expr (vec_oprnd, NULL_RTX, VOIDmode, EXPAND_NORMAL);
645 if (!(*insn_data[icode].operand[1].predicate) (rtx_op1, mode1)
646 && mode1 != VOIDmode)
647 rtx_op1 = force_reg (mode1, rtx_op1);
649 rtx_op2 = expand_expr (shift_oprnd, NULL_RTX, VOIDmode, EXPAND_NORMAL);
650 if (!(*insn_data[icode].operand[2].predicate) (rtx_op2, mode2)
651 && mode2 != VOIDmode)
652 rtx_op2 = force_reg (mode2, rtx_op2);
654 if (!target
655 || ! (*insn_data[icode].operand[0].predicate) (target, mode))
656 target = gen_reg_rtx (mode);
658 /* Emit instruction */
659 pat = GEN_FCN (icode) (target, rtx_op1, rtx_op2);
660 gcc_assert (pat);
661 emit_insn (pat);
663 return target;
666 /* This subroutine of expand_doubleword_shift handles the cases in which
667 the effective shift value is >= BITS_PER_WORD. The arguments and return
668 value are the same as for the parent routine, except that SUPERWORD_OP1
669 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
670 INTO_TARGET may be null if the caller has decided to calculate it. */
672 static bool
673 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
674 rtx outof_target, rtx into_target,
675 int unsignedp, enum optab_methods methods)
677 if (into_target != 0)
678 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
679 into_target, unsignedp, methods))
680 return false;
682 if (outof_target != 0)
684 /* For a signed right shift, we must fill OUTOF_TARGET with copies
685 of the sign bit, otherwise we must fill it with zeros. */
686 if (binoptab != ashr_optab)
687 emit_move_insn (outof_target, CONST0_RTX (word_mode));
688 else
689 if (!force_expand_binop (word_mode, binoptab,
690 outof_input, GEN_INT (BITS_PER_WORD - 1),
691 outof_target, unsignedp, methods))
692 return false;
694 return true;
697 /* This subroutine of expand_doubleword_shift handles the cases in which
698 the effective shift value is < BITS_PER_WORD. The arguments and return
699 value are the same as for the parent routine. */
701 static bool
702 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
703 rtx outof_input, rtx into_input, rtx op1,
704 rtx outof_target, rtx into_target,
705 int unsignedp, enum optab_methods methods,
706 unsigned HOST_WIDE_INT shift_mask)
708 optab reverse_unsigned_shift, unsigned_shift;
709 rtx tmp, carries;
711 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
712 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
714 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
715 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
716 the opposite direction to BINOPTAB. */
717 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
719 carries = outof_input;
720 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
721 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
722 0, true, methods);
724 else
726 /* We must avoid shifting by BITS_PER_WORD bits since that is either
727 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
728 has unknown behavior. Do a single shift first, then shift by the
729 remainder. It's OK to use ~OP1 as the remainder if shift counts
730 are truncated to the mode size. */
731 carries = expand_binop (word_mode, reverse_unsigned_shift,
732 outof_input, const1_rtx, 0, unsignedp, methods);
733 if (shift_mask == BITS_PER_WORD - 1)
735 tmp = immed_double_const (-1, -1, op1_mode);
736 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
737 0, true, methods);
739 else
741 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
742 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
743 0, true, methods);
746 if (tmp == 0 || carries == 0)
747 return false;
748 carries = expand_binop (word_mode, reverse_unsigned_shift,
749 carries, tmp, 0, unsignedp, methods);
750 if (carries == 0)
751 return false;
753 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
754 so the result can go directly into INTO_TARGET if convenient. */
755 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
756 into_target, unsignedp, methods);
757 if (tmp == 0)
758 return false;
760 /* Now OR in the bits carried over from OUTOF_INPUT. */
761 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
762 into_target, unsignedp, methods))
763 return false;
765 /* Use a standard word_mode shift for the out-of half. */
766 if (outof_target != 0)
767 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
768 outof_target, unsignedp, methods))
769 return false;
771 return true;
775 #ifdef HAVE_conditional_move
776 /* Try implementing expand_doubleword_shift using conditional moves.
777 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
778 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
779 are the shift counts to use in the former and latter case. All other
780 arguments are the same as the parent routine. */
782 static bool
783 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
784 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
785 rtx outof_input, rtx into_input,
786 rtx subword_op1, rtx superword_op1,
787 rtx outof_target, rtx into_target,
788 int unsignedp, enum optab_methods methods,
789 unsigned HOST_WIDE_INT shift_mask)
791 rtx outof_superword, into_superword;
793 /* Put the superword version of the output into OUTOF_SUPERWORD and
794 INTO_SUPERWORD. */
795 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
796 if (outof_target != 0 && subword_op1 == superword_op1)
798 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
799 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
800 into_superword = outof_target;
801 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
802 outof_superword, 0, unsignedp, methods))
803 return false;
805 else
807 into_superword = gen_reg_rtx (word_mode);
808 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
809 outof_superword, into_superword,
810 unsignedp, methods))
811 return false;
814 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
815 if (!expand_subword_shift (op1_mode, binoptab,
816 outof_input, into_input, subword_op1,
817 outof_target, into_target,
818 unsignedp, methods, shift_mask))
819 return false;
821 /* Select between them. Do the INTO half first because INTO_SUPERWORD
822 might be the current value of OUTOF_TARGET. */
823 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
824 into_target, into_superword, word_mode, false))
825 return false;
827 if (outof_target != 0)
828 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
829 outof_target, outof_superword,
830 word_mode, false))
831 return false;
833 return true;
835 #endif
837 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
838 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
839 input operand; the shift moves bits in the direction OUTOF_INPUT->
840 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
841 of the target. OP1 is the shift count and OP1_MODE is its mode.
842 If OP1 is constant, it will have been truncated as appropriate
843 and is known to be nonzero.
845 If SHIFT_MASK is zero, the result of word shifts is undefined when the
846 shift count is outside the range [0, BITS_PER_WORD). This routine must
847 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
849 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
850 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
851 fill with zeros or sign bits as appropriate.
853 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
854 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
855 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
856 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
857 are undefined.
859 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
860 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
861 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
862 function wants to calculate it itself.
864 Return true if the shift could be successfully synthesized. */
866 static bool
867 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
868 rtx outof_input, rtx into_input, rtx op1,
869 rtx outof_target, rtx into_target,
870 int unsignedp, enum optab_methods methods,
871 unsigned HOST_WIDE_INT shift_mask)
873 rtx superword_op1, tmp, cmp1, cmp2;
874 rtx subword_label, done_label;
875 enum rtx_code cmp_code;
877 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
878 fill the result with sign or zero bits as appropriate. If so, the value
879 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
880 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
881 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
883 This isn't worthwhile for constant shifts since the optimizers will
884 cope better with in-range shift counts. */
885 if (shift_mask >= BITS_PER_WORD
886 && outof_target != 0
887 && !CONSTANT_P (op1))
889 if (!expand_doubleword_shift (op1_mode, binoptab,
890 outof_input, into_input, op1,
891 0, into_target,
892 unsignedp, methods, shift_mask))
893 return false;
894 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
895 outof_target, unsignedp, methods))
896 return false;
897 return true;
900 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
901 is true when the effective shift value is less than BITS_PER_WORD.
902 Set SUPERWORD_OP1 to the shift count that should be used to shift
903 OUTOF_INPUT into INTO_TARGET when the condition is false. */
904 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
905 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
907 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
908 is a subword shift count. */
909 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
910 0, true, methods);
911 cmp2 = CONST0_RTX (op1_mode);
912 cmp_code = EQ;
913 superword_op1 = op1;
915 else
917 /* Set CMP1 to OP1 - BITS_PER_WORD. */
918 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
919 0, true, methods);
920 cmp2 = CONST0_RTX (op1_mode);
921 cmp_code = LT;
922 superword_op1 = cmp1;
924 if (cmp1 == 0)
925 return false;
927 /* If we can compute the condition at compile time, pick the
928 appropriate subroutine. */
929 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
930 if (tmp != 0 && GET_CODE (tmp) == CONST_INT)
932 if (tmp == const0_rtx)
933 return expand_superword_shift (binoptab, outof_input, superword_op1,
934 outof_target, into_target,
935 unsignedp, methods);
936 else
937 return expand_subword_shift (op1_mode, binoptab,
938 outof_input, into_input, op1,
939 outof_target, into_target,
940 unsignedp, methods, shift_mask);
943 #ifdef HAVE_conditional_move
944 /* Try using conditional moves to generate straight-line code. */
946 rtx start = get_last_insn ();
947 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
948 cmp_code, cmp1, cmp2,
949 outof_input, into_input,
950 op1, superword_op1,
951 outof_target, into_target,
952 unsignedp, methods, shift_mask))
953 return true;
954 delete_insns_since (start);
956 #endif
958 /* As a last resort, use branches to select the correct alternative. */
959 subword_label = gen_label_rtx ();
960 done_label = gen_label_rtx ();
962 NO_DEFER_POP;
963 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
964 0, 0, subword_label);
965 OK_DEFER_POP;
967 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
968 outof_target, into_target,
969 unsignedp, methods))
970 return false;
972 emit_jump_insn (gen_jump (done_label));
973 emit_barrier ();
974 emit_label (subword_label);
976 if (!expand_subword_shift (op1_mode, binoptab,
977 outof_input, into_input, op1,
978 outof_target, into_target,
979 unsignedp, methods, shift_mask))
980 return false;
982 emit_label (done_label);
983 return true;
986 /* Subroutine of expand_binop. Perform a double word multiplication of
987 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
988 as the target's word_mode. This function return NULL_RTX if anything
989 goes wrong, in which case it may have already emitted instructions
990 which need to be deleted.
992 If we want to multiply two two-word values and have normal and widening
993 multiplies of single-word values, we can do this with three smaller
994 multiplications. Note that we do not make a REG_NO_CONFLICT block here
995 because we are not operating on one word at a time.
997 The multiplication proceeds as follows:
998 _______________________
999 [__op0_high_|__op0_low__]
1000 _______________________
1001 * [__op1_high_|__op1_low__]
1002 _______________________________________________
1003 _______________________
1004 (1) [__op0_low__*__op1_low__]
1005 _______________________
1006 (2a) [__op0_low__*__op1_high_]
1007 _______________________
1008 (2b) [__op0_high_*__op1_low__]
1009 _______________________
1010 (3) [__op0_high_*__op1_high_]
1013 This gives a 4-word result. Since we are only interested in the
1014 lower 2 words, partial result (3) and the upper words of (2a) and
1015 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1016 calculated using non-widening multiplication.
1018 (1), however, needs to be calculated with an unsigned widening
1019 multiplication. If this operation is not directly supported we
1020 try using a signed widening multiplication and adjust the result.
1021 This adjustment works as follows:
1023 If both operands are positive then no adjustment is needed.
1025 If the operands have different signs, for example op0_low < 0 and
1026 op1_low >= 0, the instruction treats the most significant bit of
1027 op0_low as a sign bit instead of a bit with significance
1028 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1029 with 2**BITS_PER_WORD - op0_low, and two's complements the
1030 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1031 the result.
1033 Similarly, if both operands are negative, we need to add
1034 (op0_low + op1_low) * 2**BITS_PER_WORD.
1036 We use a trick to adjust quickly. We logically shift op0_low right
1037 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1038 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1039 logical shift exists, we do an arithmetic right shift and subtract
1040 the 0 or -1. */
1042 static rtx
1043 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
1044 bool umulp, enum optab_methods methods)
1046 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
1047 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
1048 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
1049 rtx product, adjust, product_high, temp;
1051 rtx op0_high = operand_subword_force (op0, high, mode);
1052 rtx op0_low = operand_subword_force (op0, low, mode);
1053 rtx op1_high = operand_subword_force (op1, high, mode);
1054 rtx op1_low = operand_subword_force (op1, low, mode);
1056 /* If we're using an unsigned multiply to directly compute the product
1057 of the low-order words of the operands and perform any required
1058 adjustments of the operands, we begin by trying two more multiplications
1059 and then computing the appropriate sum.
1061 We have checked above that the required addition is provided.
1062 Full-word addition will normally always succeed, especially if
1063 it is provided at all, so we don't worry about its failure. The
1064 multiplication may well fail, however, so we do handle that. */
1066 if (!umulp)
1068 /* ??? This could be done with emit_store_flag where available. */
1069 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
1070 NULL_RTX, 1, methods);
1071 if (temp)
1072 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
1073 NULL_RTX, 0, OPTAB_DIRECT);
1074 else
1076 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
1077 NULL_RTX, 0, methods);
1078 if (!temp)
1079 return NULL_RTX;
1080 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
1081 NULL_RTX, 0, OPTAB_DIRECT);
1084 if (!op0_high)
1085 return NULL_RTX;
1088 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
1089 NULL_RTX, 0, OPTAB_DIRECT);
1090 if (!adjust)
1091 return NULL_RTX;
1093 /* OP0_HIGH should now be dead. */
1095 if (!umulp)
1097 /* ??? This could be done with emit_store_flag where available. */
1098 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
1099 NULL_RTX, 1, methods);
1100 if (temp)
1101 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
1102 NULL_RTX, 0, OPTAB_DIRECT);
1103 else
1105 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
1106 NULL_RTX, 0, methods);
1107 if (!temp)
1108 return NULL_RTX;
1109 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
1110 NULL_RTX, 0, OPTAB_DIRECT);
1113 if (!op1_high)
1114 return NULL_RTX;
1117 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
1118 NULL_RTX, 0, OPTAB_DIRECT);
1119 if (!temp)
1120 return NULL_RTX;
1122 /* OP1_HIGH should now be dead. */
1124 adjust = expand_binop (word_mode, add_optab, adjust, temp,
1125 adjust, 0, OPTAB_DIRECT);
1127 if (target && !REG_P (target))
1128 target = NULL_RTX;
1130 if (umulp)
1131 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
1132 target, 1, OPTAB_DIRECT);
1133 else
1134 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
1135 target, 1, OPTAB_DIRECT);
1137 if (!product)
1138 return NULL_RTX;
1140 product_high = operand_subword (product, high, 1, mode);
1141 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
1142 REG_P (product_high) ? product_high : adjust,
1143 0, OPTAB_DIRECT);
1144 emit_move_insn (product_high, adjust);
1145 return product;
1148 /* Wrapper around expand_binop which takes an rtx code to specify
1149 the operation to perform, not an optab pointer. All other
1150 arguments are the same. */
1152 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
1153 rtx op1, rtx target, int unsignedp,
1154 enum optab_methods methods)
1156 optab binop = code_to_optab[(int) code];
1157 gcc_assert (binop);
1159 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1162 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1163 binop. Order them according to commutative_operand_precedence and, if
1164 possible, try to put TARGET or a pseudo first. */
1165 static bool
1166 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
1168 int op0_prec = commutative_operand_precedence (op0);
1169 int op1_prec = commutative_operand_precedence (op1);
1171 if (op0_prec < op1_prec)
1172 return true;
1174 if (op0_prec > op1_prec)
1175 return false;
1177 /* With equal precedence, both orders are ok, but it is better if the
1178 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1179 if (target == 0 || REG_P (target))
1180 return (REG_P (op1) && !REG_P (op0)) || target == op1;
1181 else
1182 return rtx_equal_p (op1, target);
1186 /* Generate code to perform an operation specified by BINOPTAB
1187 on operands OP0 and OP1, with result having machine-mode MODE.
1189 UNSIGNEDP is for the case where we have to widen the operands
1190 to perform the operation. It says to use zero-extension.
1192 If TARGET is nonzero, the value
1193 is generated there, if it is convenient to do so.
1194 In all cases an rtx is returned for the locus of the value;
1195 this may or may not be TARGET. */
1198 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
1199 rtx target, int unsignedp, enum optab_methods methods)
1201 enum optab_methods next_methods
1202 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1203 ? OPTAB_WIDEN : methods);
1204 enum mode_class class;
1205 enum machine_mode wider_mode;
1206 rtx temp;
1207 int commutative_op = 0;
1208 int shift_op = (binoptab->code == ASHIFT
1209 || binoptab->code == ASHIFTRT
1210 || binoptab->code == LSHIFTRT
1211 || binoptab->code == ROTATE
1212 || binoptab->code == ROTATERT);
1213 rtx entry_last = get_last_insn ();
1214 rtx last;
1215 bool first_pass_p = true;
1217 class = GET_MODE_CLASS (mode);
1219 /* If subtracting an integer constant, convert this into an addition of
1220 the negated constant. */
1222 if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
1224 op1 = negate_rtx (mode, op1);
1225 binoptab = add_optab;
1228 /* If we are inside an appropriately-short loop and we are optimizing,
1229 force expensive constants into a register. */
1230 if (CONSTANT_P (op0) && optimize
1231 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
1233 if (GET_MODE (op0) != VOIDmode)
1234 op0 = convert_modes (mode, VOIDmode, op0, unsignedp);
1235 op0 = force_reg (mode, op0);
1238 if (CONSTANT_P (op1) && optimize
1239 && ! shift_op && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
1241 if (GET_MODE (op1) != VOIDmode)
1242 op1 = convert_modes (mode, VOIDmode, op1, unsignedp);
1243 op1 = force_reg (mode, op1);
1246 /* Record where to delete back to if we backtrack. */
1247 last = get_last_insn ();
1249 /* If operation is commutative,
1250 try to make the first operand a register.
1251 Even better, try to make it the same as the target.
1252 Also try to make the last operand a constant. */
1253 if (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1254 || binoptab == smul_widen_optab
1255 || binoptab == umul_widen_optab
1256 || binoptab == smul_highpart_optab
1257 || binoptab == umul_highpart_optab)
1259 commutative_op = 1;
1261 if (swap_commutative_operands_with_target (target, op0, op1))
1263 temp = op1;
1264 op1 = op0;
1265 op0 = temp;
1269 retry:
1271 /* If we can do it with a three-operand insn, do so. */
1273 if (methods != OPTAB_MUST_WIDEN
1274 && binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1276 int icode = (int) binoptab->handlers[(int) mode].insn_code;
1277 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1278 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1279 rtx pat;
1280 rtx xop0 = op0, xop1 = op1;
1282 if (target)
1283 temp = target;
1284 else
1285 temp = gen_reg_rtx (mode);
1287 /* If it is a commutative operator and the modes would match
1288 if we would swap the operands, we can save the conversions. */
1289 if (commutative_op)
1291 if (GET_MODE (op0) != mode0 && GET_MODE (op1) != mode1
1292 && GET_MODE (op0) == mode1 && GET_MODE (op1) == mode0)
1294 rtx tmp;
1296 tmp = op0; op0 = op1; op1 = tmp;
1297 tmp = xop0; xop0 = xop1; xop1 = tmp;
1301 /* In case the insn wants input operands in modes different from
1302 those of the actual operands, convert the operands. It would
1303 seem that we don't need to convert CONST_INTs, but we do, so
1304 that they're properly zero-extended, sign-extended or truncated
1305 for their mode. */
1307 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
1308 xop0 = convert_modes (mode0,
1309 GET_MODE (op0) != VOIDmode
1310 ? GET_MODE (op0)
1311 : mode,
1312 xop0, unsignedp);
1314 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
1315 xop1 = convert_modes (mode1,
1316 GET_MODE (op1) != VOIDmode
1317 ? GET_MODE (op1)
1318 : mode,
1319 xop1, unsignedp);
1321 /* Now, if insn's predicates don't allow our operands, put them into
1322 pseudo regs. */
1324 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
1325 && mode0 != VOIDmode)
1326 xop0 = copy_to_mode_reg (mode0, xop0);
1328 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
1329 && mode1 != VOIDmode)
1330 xop1 = copy_to_mode_reg (mode1, xop1);
1332 if (!insn_data[icode].operand[0].predicate (temp, mode))
1333 temp = gen_reg_rtx (mode);
1335 pat = GEN_FCN (icode) (temp, xop0, xop1);
1336 if (pat)
1338 /* If PAT is composed of more than one insn, try to add an appropriate
1339 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1340 operand, call ourselves again, this time without a target. */
1341 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1342 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
1344 delete_insns_since (last);
1345 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1346 unsignedp, methods);
1349 emit_insn (pat);
1350 return temp;
1352 else
1353 delete_insns_since (last);
1356 /* If we were trying to rotate by a constant value, and that didn't
1357 work, try rotating the other direction before falling back to
1358 shifts and bitwise-or. */
1359 if (first_pass_p
1360 && (binoptab == rotl_optab || binoptab == rotr_optab)
1361 && class == MODE_INT
1362 && GET_CODE (op1) == CONST_INT
1363 && INTVAL (op1) > 0
1364 && (unsigned int) INTVAL (op1) < GET_MODE_BITSIZE (mode))
1366 first_pass_p = false;
1367 op1 = GEN_INT (GET_MODE_BITSIZE (mode) - INTVAL (op1));
1368 binoptab = binoptab == rotl_optab ? rotr_optab : rotl_optab;
1369 goto retry;
1372 /* If this is a multiply, see if we can do a widening operation that
1373 takes operands of this mode and makes a wider mode. */
1375 if (binoptab == smul_optab
1376 && GET_MODE_WIDER_MODE (mode) != VOIDmode
1377 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1378 ->handlers[(int) GET_MODE_WIDER_MODE (mode)].insn_code)
1379 != CODE_FOR_nothing))
1381 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
1382 unsignedp ? umul_widen_optab : smul_widen_optab,
1383 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1385 if (temp != 0)
1387 if (GET_MODE_CLASS (mode) == MODE_INT
1388 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1389 GET_MODE_BITSIZE (GET_MODE (temp))))
1390 return gen_lowpart (mode, temp);
1391 else
1392 return convert_to_mode (mode, temp, unsignedp);
1396 /* Look for a wider mode of the same class for which we think we
1397 can open-code the operation. Check for a widening multiply at the
1398 wider mode as well. */
1400 if (CLASS_HAS_WIDER_MODES_P (class)
1401 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1402 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1403 wider_mode != VOIDmode;
1404 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1406 if (binoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
1407 || (binoptab == smul_optab
1408 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1409 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1410 ->handlers[(int) GET_MODE_WIDER_MODE (wider_mode)].insn_code)
1411 != CODE_FOR_nothing)))
1413 rtx xop0 = op0, xop1 = op1;
1414 int no_extend = 0;
1416 /* For certain integer operations, we need not actually extend
1417 the narrow operands, as long as we will truncate
1418 the results to the same narrowness. */
1420 if ((binoptab == ior_optab || binoptab == and_optab
1421 || binoptab == xor_optab
1422 || binoptab == add_optab || binoptab == sub_optab
1423 || binoptab == smul_optab || binoptab == ashl_optab)
1424 && class == MODE_INT)
1425 no_extend = 1;
1427 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1429 /* The second operand of a shift must always be extended. */
1430 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1431 no_extend && binoptab != ashl_optab);
1433 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1434 unsignedp, OPTAB_DIRECT);
1435 if (temp)
1437 if (class != MODE_INT
1438 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1439 GET_MODE_BITSIZE (wider_mode)))
1441 if (target == 0)
1442 target = gen_reg_rtx (mode);
1443 convert_move (target, temp, 0);
1444 return target;
1446 else
1447 return gen_lowpart (mode, temp);
1449 else
1450 delete_insns_since (last);
1454 /* These can be done a word at a time. */
1455 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1456 && class == MODE_INT
1457 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1458 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1460 int i;
1461 rtx insns;
1462 rtx equiv_value;
1464 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1465 won't be accurate, so use a new target. */
1466 if (target == 0 || target == op0 || target == op1)
1467 target = gen_reg_rtx (mode);
1469 start_sequence ();
1471 /* Do the actual arithmetic. */
1472 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1474 rtx target_piece = operand_subword (target, i, 1, mode);
1475 rtx x = expand_binop (word_mode, binoptab,
1476 operand_subword_force (op0, i, mode),
1477 operand_subword_force (op1, i, mode),
1478 target_piece, unsignedp, next_methods);
1480 if (x == 0)
1481 break;
1483 if (target_piece != x)
1484 emit_move_insn (target_piece, x);
1487 insns = get_insns ();
1488 end_sequence ();
1490 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1492 if (binoptab->code != UNKNOWN)
1493 equiv_value
1494 = gen_rtx_fmt_ee (binoptab->code, mode,
1495 copy_rtx (op0), copy_rtx (op1));
1496 else
1497 equiv_value = 0;
1499 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1500 return target;
1504 /* Synthesize double word shifts from single word shifts. */
1505 if ((binoptab == lshr_optab || binoptab == ashl_optab
1506 || binoptab == ashr_optab)
1507 && class == MODE_INT
1508 && (GET_CODE (op1) == CONST_INT || !optimize_size)
1509 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1510 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1511 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1512 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1514 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1515 enum machine_mode op1_mode;
1517 double_shift_mask = targetm.shift_truncation_mask (mode);
1518 shift_mask = targetm.shift_truncation_mask (word_mode);
1519 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1521 /* Apply the truncation to constant shifts. */
1522 if (double_shift_mask > 0 && GET_CODE (op1) == CONST_INT)
1523 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1525 if (op1 == CONST0_RTX (op1_mode))
1526 return op0;
1528 /* Make sure that this is a combination that expand_doubleword_shift
1529 can handle. See the comments there for details. */
1530 if (double_shift_mask == 0
1531 || (shift_mask == BITS_PER_WORD - 1
1532 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1534 rtx insns, equiv_value;
1535 rtx into_target, outof_target;
1536 rtx into_input, outof_input;
1537 int left_shift, outof_word;
1539 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1540 won't be accurate, so use a new target. */
1541 if (target == 0 || target == op0 || target == op1)
1542 target = gen_reg_rtx (mode);
1544 start_sequence ();
1546 /* OUTOF_* is the word we are shifting bits away from, and
1547 INTO_* is the word that we are shifting bits towards, thus
1548 they differ depending on the direction of the shift and
1549 WORDS_BIG_ENDIAN. */
1551 left_shift = binoptab == ashl_optab;
1552 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1554 outof_target = operand_subword (target, outof_word, 1, mode);
1555 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1557 outof_input = operand_subword_force (op0, outof_word, mode);
1558 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1560 if (expand_doubleword_shift (op1_mode, binoptab,
1561 outof_input, into_input, op1,
1562 outof_target, into_target,
1563 unsignedp, next_methods, shift_mask))
1565 insns = get_insns ();
1566 end_sequence ();
1568 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1569 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1570 return target;
1572 end_sequence ();
1576 /* Synthesize double word rotates from single word shifts. */
1577 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1578 && class == MODE_INT
1579 && GET_CODE (op1) == CONST_INT
1580 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1581 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1582 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1584 rtx insns;
1585 rtx into_target, outof_target;
1586 rtx into_input, outof_input;
1587 rtx inter;
1588 int shift_count, left_shift, outof_word;
1590 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1591 won't be accurate, so use a new target. Do this also if target is not
1592 a REG, first because having a register instead may open optimization
1593 opportunities, and second because if target and op0 happen to be MEMs
1594 designating the same location, we would risk clobbering it too early
1595 in the code sequence we generate below. */
1596 if (target == 0 || target == op0 || target == op1 || ! REG_P (target))
1597 target = gen_reg_rtx (mode);
1599 start_sequence ();
1601 shift_count = INTVAL (op1);
1603 /* OUTOF_* is the word we are shifting bits away from, and
1604 INTO_* is the word that we are shifting bits towards, thus
1605 they differ depending on the direction of the shift and
1606 WORDS_BIG_ENDIAN. */
1608 left_shift = (binoptab == rotl_optab);
1609 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1611 outof_target = operand_subword (target, outof_word, 1, mode);
1612 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1614 outof_input = operand_subword_force (op0, outof_word, mode);
1615 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1617 if (shift_count == BITS_PER_WORD)
1619 /* This is just a word swap. */
1620 emit_move_insn (outof_target, into_input);
1621 emit_move_insn (into_target, outof_input);
1622 inter = const0_rtx;
1624 else
1626 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1627 rtx first_shift_count, second_shift_count;
1628 optab reverse_unsigned_shift, unsigned_shift;
1630 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1631 ? lshr_optab : ashl_optab);
1633 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1634 ? ashl_optab : lshr_optab);
1636 if (shift_count > BITS_PER_WORD)
1638 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1639 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1641 else
1643 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1644 second_shift_count = GEN_INT (shift_count);
1647 into_temp1 = expand_binop (word_mode, unsigned_shift,
1648 outof_input, first_shift_count,
1649 NULL_RTX, unsignedp, next_methods);
1650 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1651 into_input, second_shift_count,
1652 NULL_RTX, unsignedp, next_methods);
1654 if (into_temp1 != 0 && into_temp2 != 0)
1655 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1656 into_target, unsignedp, next_methods);
1657 else
1658 inter = 0;
1660 if (inter != 0 && inter != into_target)
1661 emit_move_insn (into_target, inter);
1663 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1664 into_input, first_shift_count,
1665 NULL_RTX, unsignedp, next_methods);
1666 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1667 outof_input, second_shift_count,
1668 NULL_RTX, unsignedp, next_methods);
1670 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1671 inter = expand_binop (word_mode, ior_optab,
1672 outof_temp1, outof_temp2,
1673 outof_target, unsignedp, next_methods);
1675 if (inter != 0 && inter != outof_target)
1676 emit_move_insn (outof_target, inter);
1679 insns = get_insns ();
1680 end_sequence ();
1682 if (inter != 0)
1684 /* One may be tempted to wrap the insns in a REG_NO_CONFLICT
1685 block to help the register allocator a bit. But a multi-word
1686 rotate will need all the input bits when setting the output
1687 bits, so there clearly is a conflict between the input and
1688 output registers. So we can't use a no-conflict block here. */
1689 emit_insn (insns);
1690 return target;
1694 /* These can be done a word at a time by propagating carries. */
1695 if ((binoptab == add_optab || binoptab == sub_optab)
1696 && class == MODE_INT
1697 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1698 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1700 unsigned int i;
1701 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1702 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1703 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1704 rtx xop0, xop1, xtarget;
1706 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1707 value is one of those, use it. Otherwise, use 1 since it is the
1708 one easiest to get. */
1709 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1710 int normalizep = STORE_FLAG_VALUE;
1711 #else
1712 int normalizep = 1;
1713 #endif
1715 /* Prepare the operands. */
1716 xop0 = force_reg (mode, op0);
1717 xop1 = force_reg (mode, op1);
1719 xtarget = gen_reg_rtx (mode);
1721 if (target == 0 || !REG_P (target))
1722 target = xtarget;
1724 /* Indicate for flow that the entire target reg is being set. */
1725 if (REG_P (target))
1726 emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget));
1728 /* Do the actual arithmetic. */
1729 for (i = 0; i < nwords; i++)
1731 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1732 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1733 rtx op0_piece = operand_subword_force (xop0, index, mode);
1734 rtx op1_piece = operand_subword_force (xop1, index, mode);
1735 rtx x;
1737 /* Main add/subtract of the input operands. */
1738 x = expand_binop (word_mode, binoptab,
1739 op0_piece, op1_piece,
1740 target_piece, unsignedp, next_methods);
1741 if (x == 0)
1742 break;
1744 if (i + 1 < nwords)
1746 /* Store carry from main add/subtract. */
1747 carry_out = gen_reg_rtx (word_mode);
1748 carry_out = emit_store_flag_force (carry_out,
1749 (binoptab == add_optab
1750 ? LT : GT),
1751 x, op0_piece,
1752 word_mode, 1, normalizep);
1755 if (i > 0)
1757 rtx newx;
1759 /* Add/subtract previous carry to main result. */
1760 newx = expand_binop (word_mode,
1761 normalizep == 1 ? binoptab : otheroptab,
1762 x, carry_in,
1763 NULL_RTX, 1, next_methods);
1765 if (i + 1 < nwords)
1767 /* Get out carry from adding/subtracting carry in. */
1768 rtx carry_tmp = gen_reg_rtx (word_mode);
1769 carry_tmp = emit_store_flag_force (carry_tmp,
1770 (binoptab == add_optab
1771 ? LT : GT),
1772 newx, x,
1773 word_mode, 1, normalizep);
1775 /* Logical-ior the two poss. carry together. */
1776 carry_out = expand_binop (word_mode, ior_optab,
1777 carry_out, carry_tmp,
1778 carry_out, 0, next_methods);
1779 if (carry_out == 0)
1780 break;
1782 emit_move_insn (target_piece, newx);
1784 else
1786 if (x != target_piece)
1787 emit_move_insn (target_piece, x);
1790 carry_in = carry_out;
1793 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
1795 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
1796 || ! rtx_equal_p (target, xtarget))
1798 rtx temp = emit_move_insn (target, xtarget);
1800 set_unique_reg_note (temp,
1801 REG_EQUAL,
1802 gen_rtx_fmt_ee (binoptab->code, mode,
1803 copy_rtx (xop0),
1804 copy_rtx (xop1)));
1806 else
1807 target = xtarget;
1809 return target;
1812 else
1813 delete_insns_since (last);
1816 /* Attempt to synthesize double word multiplies using a sequence of word
1817 mode multiplications. We first attempt to generate a sequence using a
1818 more efficient unsigned widening multiply, and if that fails we then
1819 try using a signed widening multiply. */
1821 if (binoptab == smul_optab
1822 && class == MODE_INT
1823 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1824 && smul_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1825 && add_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1827 rtx product = NULL_RTX;
1829 if (umul_widen_optab->handlers[(int) mode].insn_code
1830 != CODE_FOR_nothing)
1832 product = expand_doubleword_mult (mode, op0, op1, target,
1833 true, methods);
1834 if (!product)
1835 delete_insns_since (last);
1838 if (product == NULL_RTX
1839 && smul_widen_optab->handlers[(int) mode].insn_code
1840 != CODE_FOR_nothing)
1842 product = expand_doubleword_mult (mode, op0, op1, target,
1843 false, methods);
1844 if (!product)
1845 delete_insns_since (last);
1848 if (product != NULL_RTX)
1850 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1852 temp = emit_move_insn (target ? target : product, product);
1853 set_unique_reg_note (temp,
1854 REG_EQUAL,
1855 gen_rtx_fmt_ee (MULT, mode,
1856 copy_rtx (op0),
1857 copy_rtx (op1)));
1859 return product;
1863 /* It can't be open-coded in this mode.
1864 Use a library call if one is available and caller says that's ok. */
1866 if (binoptab->handlers[(int) mode].libfunc
1867 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1869 rtx insns;
1870 rtx op1x = op1;
1871 enum machine_mode op1_mode = mode;
1872 rtx value;
1874 start_sequence ();
1876 if (shift_op)
1878 op1_mode = word_mode;
1879 /* Specify unsigned here,
1880 since negative shift counts are meaningless. */
1881 op1x = convert_to_mode (word_mode, op1, 1);
1884 if (GET_MODE (op0) != VOIDmode
1885 && GET_MODE (op0) != mode)
1886 op0 = convert_to_mode (mode, op0, unsignedp);
1888 /* Pass 1 for NO_QUEUE so we don't lose any increments
1889 if the libcall is cse'd or moved. */
1890 value = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
1891 NULL_RTX, LCT_CONST, mode, 2,
1892 op0, mode, op1x, op1_mode);
1894 insns = get_insns ();
1895 end_sequence ();
1897 target = gen_reg_rtx (mode);
1898 emit_libcall_block (insns, target, value,
1899 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
1901 return target;
1904 delete_insns_since (last);
1906 /* It can't be done in this mode. Can we do it in a wider mode? */
1908 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
1909 || methods == OPTAB_MUST_WIDEN))
1911 /* Caller says, don't even try. */
1912 delete_insns_since (entry_last);
1913 return 0;
1916 /* Compute the value of METHODS to pass to recursive calls.
1917 Don't allow widening to be tried recursively. */
1919 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
1921 /* Look for a wider mode of the same class for which it appears we can do
1922 the operation. */
1924 if (CLASS_HAS_WIDER_MODES_P (class))
1926 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1927 wider_mode != VOIDmode;
1928 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1930 if ((binoptab->handlers[(int) wider_mode].insn_code
1931 != CODE_FOR_nothing)
1932 || (methods == OPTAB_LIB
1933 && binoptab->handlers[(int) wider_mode].libfunc))
1935 rtx xop0 = op0, xop1 = op1;
1936 int no_extend = 0;
1938 /* For certain integer operations, we need not actually extend
1939 the narrow operands, as long as we will truncate
1940 the results to the same narrowness. */
1942 if ((binoptab == ior_optab || binoptab == and_optab
1943 || binoptab == xor_optab
1944 || binoptab == add_optab || binoptab == sub_optab
1945 || binoptab == smul_optab || binoptab == ashl_optab)
1946 && class == MODE_INT)
1947 no_extend = 1;
1949 xop0 = widen_operand (xop0, wider_mode, mode,
1950 unsignedp, no_extend);
1952 /* The second operand of a shift must always be extended. */
1953 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1954 no_extend && binoptab != ashl_optab);
1956 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1957 unsignedp, methods);
1958 if (temp)
1960 if (class != MODE_INT
1961 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1962 GET_MODE_BITSIZE (wider_mode)))
1964 if (target == 0)
1965 target = gen_reg_rtx (mode);
1966 convert_move (target, temp, 0);
1967 return target;
1969 else
1970 return gen_lowpart (mode, temp);
1972 else
1973 delete_insns_since (last);
1978 delete_insns_since (entry_last);
1979 return 0;
1982 /* Expand a binary operator which has both signed and unsigned forms.
1983 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1984 signed operations.
1986 If we widen unsigned operands, we may use a signed wider operation instead
1987 of an unsigned wider operation, since the result would be the same. */
1990 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
1991 rtx op0, rtx op1, rtx target, int unsignedp,
1992 enum optab_methods methods)
1994 rtx temp;
1995 optab direct_optab = unsignedp ? uoptab : soptab;
1996 struct optab wide_soptab;
1998 /* Do it without widening, if possible. */
1999 temp = expand_binop (mode, direct_optab, op0, op1, target,
2000 unsignedp, OPTAB_DIRECT);
2001 if (temp || methods == OPTAB_DIRECT)
2002 return temp;
2004 /* Try widening to a signed int. Make a fake signed optab that
2005 hides any signed insn for direct use. */
2006 wide_soptab = *soptab;
2007 wide_soptab.handlers[(int) mode].insn_code = CODE_FOR_nothing;
2008 wide_soptab.handlers[(int) mode].libfunc = 0;
2010 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2011 unsignedp, OPTAB_WIDEN);
2013 /* For unsigned operands, try widening to an unsigned int. */
2014 if (temp == 0 && unsignedp)
2015 temp = expand_binop (mode, uoptab, op0, op1, target,
2016 unsignedp, OPTAB_WIDEN);
2017 if (temp || methods == OPTAB_WIDEN)
2018 return temp;
2020 /* Use the right width lib call if that exists. */
2021 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
2022 if (temp || methods == OPTAB_LIB)
2023 return temp;
2025 /* Must widen and use a lib call, use either signed or unsigned. */
2026 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2027 unsignedp, methods);
2028 if (temp != 0)
2029 return temp;
2030 if (unsignedp)
2031 return expand_binop (mode, uoptab, op0, op1, target,
2032 unsignedp, methods);
2033 return 0;
2036 /* Generate code to perform an operation specified by UNOPPTAB
2037 on operand OP0, with two results to TARG0 and TARG1.
2038 We assume that the order of the operands for the instruction
2039 is TARG0, TARG1, OP0.
2041 Either TARG0 or TARG1 may be zero, but what that means is that
2042 the result is not actually wanted. We will generate it into
2043 a dummy pseudo-reg and discard it. They may not both be zero.
2045 Returns 1 if this operation can be performed; 0 if not. */
2048 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
2049 int unsignedp)
2051 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2052 enum mode_class class;
2053 enum machine_mode wider_mode;
2054 rtx entry_last = get_last_insn ();
2055 rtx last;
2057 class = GET_MODE_CLASS (mode);
2059 if (!targ0)
2060 targ0 = gen_reg_rtx (mode);
2061 if (!targ1)
2062 targ1 = gen_reg_rtx (mode);
2064 /* Record where to go back to if we fail. */
2065 last = get_last_insn ();
2067 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2069 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2070 enum machine_mode mode0 = insn_data[icode].operand[2].mode;
2071 rtx pat;
2072 rtx xop0 = op0;
2074 if (GET_MODE (xop0) != VOIDmode
2075 && GET_MODE (xop0) != mode0)
2076 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2078 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2079 if (!insn_data[icode].operand[2].predicate (xop0, mode0))
2080 xop0 = copy_to_mode_reg (mode0, xop0);
2082 /* We could handle this, but we should always be called with a pseudo
2083 for our targets and all insns should take them as outputs. */
2084 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2085 gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
2087 pat = GEN_FCN (icode) (targ0, targ1, xop0);
2088 if (pat)
2090 emit_insn (pat);
2091 return 1;
2093 else
2094 delete_insns_since (last);
2097 /* It can't be done in this mode. Can we do it in a wider mode? */
2099 if (CLASS_HAS_WIDER_MODES_P (class))
2101 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2102 wider_mode != VOIDmode;
2103 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2105 if (unoptab->handlers[(int) wider_mode].insn_code
2106 != CODE_FOR_nothing)
2108 rtx t0 = gen_reg_rtx (wider_mode);
2109 rtx t1 = gen_reg_rtx (wider_mode);
2110 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2112 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
2114 convert_move (targ0, t0, unsignedp);
2115 convert_move (targ1, t1, unsignedp);
2116 return 1;
2118 else
2119 delete_insns_since (last);
2124 delete_insns_since (entry_last);
2125 return 0;
2128 /* Generate code to perform an operation specified by BINOPTAB
2129 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2130 We assume that the order of the operands for the instruction
2131 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2132 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2134 Either TARG0 or TARG1 may be zero, but what that means is that
2135 the result is not actually wanted. We will generate it into
2136 a dummy pseudo-reg and discard it. They may not both be zero.
2138 Returns 1 if this operation can be performed; 0 if not. */
2141 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2142 int unsignedp)
2144 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2145 enum mode_class class;
2146 enum machine_mode wider_mode;
2147 rtx entry_last = get_last_insn ();
2148 rtx last;
2150 class = GET_MODE_CLASS (mode);
2152 /* If we are inside an appropriately-short loop and we are optimizing,
2153 force expensive constants into a register. */
2154 if (CONSTANT_P (op0) && optimize
2155 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
2156 op0 = force_reg (mode, op0);
2158 if (CONSTANT_P (op1) && optimize
2159 && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
2160 op1 = force_reg (mode, op1);
2162 if (!targ0)
2163 targ0 = gen_reg_rtx (mode);
2164 if (!targ1)
2165 targ1 = gen_reg_rtx (mode);
2167 /* Record where to go back to if we fail. */
2168 last = get_last_insn ();
2170 if (binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2172 int icode = (int) binoptab->handlers[(int) mode].insn_code;
2173 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2174 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
2175 rtx pat;
2176 rtx xop0 = op0, xop1 = op1;
2178 /* In case the insn wants input operands in modes different from
2179 those of the actual operands, convert the operands. It would
2180 seem that we don't need to convert CONST_INTs, but we do, so
2181 that they're properly zero-extended, sign-extended or truncated
2182 for their mode. */
2184 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
2185 xop0 = convert_modes (mode0,
2186 GET_MODE (op0) != VOIDmode
2187 ? GET_MODE (op0)
2188 : mode,
2189 xop0, unsignedp);
2191 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
2192 xop1 = convert_modes (mode1,
2193 GET_MODE (op1) != VOIDmode
2194 ? GET_MODE (op1)
2195 : mode,
2196 xop1, unsignedp);
2198 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2199 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2200 xop0 = copy_to_mode_reg (mode0, xop0);
2202 if (!insn_data[icode].operand[2].predicate (xop1, mode1))
2203 xop1 = copy_to_mode_reg (mode1, xop1);
2205 /* We could handle this, but we should always be called with a pseudo
2206 for our targets and all insns should take them as outputs. */
2207 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2208 gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
2210 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
2211 if (pat)
2213 emit_insn (pat);
2214 return 1;
2216 else
2217 delete_insns_since (last);
2220 /* It can't be done in this mode. Can we do it in a wider mode? */
2222 if (CLASS_HAS_WIDER_MODES_P (class))
2224 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2225 wider_mode != VOIDmode;
2226 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2228 if (binoptab->handlers[(int) wider_mode].insn_code
2229 != CODE_FOR_nothing)
2231 rtx t0 = gen_reg_rtx (wider_mode);
2232 rtx t1 = gen_reg_rtx (wider_mode);
2233 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2234 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2236 if (expand_twoval_binop (binoptab, cop0, cop1,
2237 t0, t1, unsignedp))
2239 convert_move (targ0, t0, unsignedp);
2240 convert_move (targ1, t1, unsignedp);
2241 return 1;
2243 else
2244 delete_insns_since (last);
2249 delete_insns_since (entry_last);
2250 return 0;
2253 /* Expand the two-valued library call indicated by BINOPTAB, but
2254 preserve only one of the values. If TARG0 is non-NULL, the first
2255 value is placed into TARG0; otherwise the second value is placed
2256 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2257 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2258 This routine assumes that the value returned by the library call is
2259 as if the return value was of an integral mode twice as wide as the
2260 mode of OP0. Returns 1 if the call was successful. */
2262 bool
2263 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2264 rtx targ0, rtx targ1, enum rtx_code code)
2266 enum machine_mode mode;
2267 enum machine_mode libval_mode;
2268 rtx libval;
2269 rtx insns;
2271 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2272 gcc_assert (!targ0 != !targ1);
2274 mode = GET_MODE (op0);
2275 if (!binoptab->handlers[(int) mode].libfunc)
2276 return false;
2278 /* The value returned by the library function will have twice as
2279 many bits as the nominal MODE. */
2280 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2281 MODE_INT);
2282 start_sequence ();
2283 libval = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
2284 NULL_RTX, LCT_CONST,
2285 libval_mode, 2,
2286 op0, mode,
2287 op1, mode);
2288 /* Get the part of VAL containing the value that we want. */
2289 libval = simplify_gen_subreg (mode, libval, libval_mode,
2290 targ0 ? 0 : GET_MODE_SIZE (mode));
2291 insns = get_insns ();
2292 end_sequence ();
2293 /* Move the into the desired location. */
2294 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2295 gen_rtx_fmt_ee (code, mode, op0, op1));
2297 return true;
2301 /* Wrapper around expand_unop which takes an rtx code to specify
2302 the operation to perform, not an optab pointer. All other
2303 arguments are the same. */
2305 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2306 rtx target, int unsignedp)
2308 optab unop = code_to_optab[(int) code];
2309 gcc_assert (unop);
2311 return expand_unop (mode, unop, op0, target, unsignedp);
2314 /* Try calculating
2315 (clz:narrow x)
2317 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2318 static rtx
2319 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2321 enum mode_class class = GET_MODE_CLASS (mode);
2322 if (CLASS_HAS_WIDER_MODES_P (class))
2324 enum machine_mode wider_mode;
2325 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2326 wider_mode != VOIDmode;
2327 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2329 if (clz_optab->handlers[(int) wider_mode].insn_code
2330 != CODE_FOR_nothing)
2332 rtx xop0, temp, last;
2334 last = get_last_insn ();
2336 if (target == 0)
2337 target = gen_reg_rtx (mode);
2338 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2339 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2340 if (temp != 0)
2341 temp = expand_binop (wider_mode, sub_optab, temp,
2342 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2343 - GET_MODE_BITSIZE (mode)),
2344 target, true, OPTAB_DIRECT);
2345 if (temp == 0)
2346 delete_insns_since (last);
2348 return temp;
2352 return 0;
2355 /* Try calculating (parity x) as (and (popcount x) 1), where
2356 popcount can also be done in a wider mode. */
2357 static rtx
2358 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2360 enum mode_class class = GET_MODE_CLASS (mode);
2361 if (CLASS_HAS_WIDER_MODES_P (class))
2363 enum machine_mode wider_mode;
2364 for (wider_mode = mode; wider_mode != VOIDmode;
2365 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2367 if (popcount_optab->handlers[(int) wider_mode].insn_code
2368 != CODE_FOR_nothing)
2370 rtx xop0, temp, last;
2372 last = get_last_insn ();
2374 if (target == 0)
2375 target = gen_reg_rtx (mode);
2376 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2377 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2378 true);
2379 if (temp != 0)
2380 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2381 target, true, OPTAB_DIRECT);
2382 if (temp == 0)
2383 delete_insns_since (last);
2385 return temp;
2389 return 0;
2392 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2393 conditions, VAL may already be a SUBREG against which we cannot generate
2394 a further SUBREG. In this case, we expect forcing the value into a
2395 register will work around the situation. */
2397 static rtx
2398 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2399 enum machine_mode imode)
2401 rtx ret;
2402 ret = lowpart_subreg (omode, val, imode);
2403 if (ret == NULL)
2405 val = force_reg (imode, val);
2406 ret = lowpart_subreg (omode, val, imode);
2407 gcc_assert (ret != NULL);
2409 return ret;
2412 /* Expand a floating point absolute value or negation operation via a
2413 logical operation on the sign bit. */
2415 static rtx
2416 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2417 rtx op0, rtx target)
2419 const struct real_format *fmt;
2420 int bitpos, word, nwords, i;
2421 enum machine_mode imode;
2422 HOST_WIDE_INT hi, lo;
2423 rtx temp, insns;
2425 /* The format has to have a simple sign bit. */
2426 fmt = REAL_MODE_FORMAT (mode);
2427 if (fmt == NULL)
2428 return NULL_RTX;
2430 bitpos = fmt->signbit_rw;
2431 if (bitpos < 0)
2432 return NULL_RTX;
2434 /* Don't create negative zeros if the format doesn't support them. */
2435 if (code == NEG && !fmt->has_signed_zero)
2436 return NULL_RTX;
2438 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2440 imode = int_mode_for_mode (mode);
2441 if (imode == BLKmode)
2442 return NULL_RTX;
2443 word = 0;
2444 nwords = 1;
2446 else
2448 imode = word_mode;
2450 if (FLOAT_WORDS_BIG_ENDIAN)
2451 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2452 else
2453 word = bitpos / BITS_PER_WORD;
2454 bitpos = bitpos % BITS_PER_WORD;
2455 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2458 if (bitpos < HOST_BITS_PER_WIDE_INT)
2460 hi = 0;
2461 lo = (HOST_WIDE_INT) 1 << bitpos;
2463 else
2465 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2466 lo = 0;
2468 if (code == ABS)
2469 lo = ~lo, hi = ~hi;
2471 if (target == 0 || target == op0)
2472 target = gen_reg_rtx (mode);
2474 if (nwords > 1)
2476 start_sequence ();
2478 for (i = 0; i < nwords; ++i)
2480 rtx targ_piece = operand_subword (target, i, 1, mode);
2481 rtx op0_piece = operand_subword_force (op0, i, mode);
2483 if (i == word)
2485 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2486 op0_piece,
2487 immed_double_const (lo, hi, imode),
2488 targ_piece, 1, OPTAB_LIB_WIDEN);
2489 if (temp != targ_piece)
2490 emit_move_insn (targ_piece, temp);
2492 else
2493 emit_move_insn (targ_piece, op0_piece);
2496 insns = get_insns ();
2497 end_sequence ();
2499 temp = gen_rtx_fmt_e (code, mode, copy_rtx (op0));
2500 emit_no_conflict_block (insns, target, op0, NULL_RTX, temp);
2502 else
2504 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2505 gen_lowpart (imode, op0),
2506 immed_double_const (lo, hi, imode),
2507 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2508 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2510 set_unique_reg_note (get_last_insn (), REG_EQUAL,
2511 gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
2514 return target;
2517 /* Generate code to perform an operation specified by UNOPTAB
2518 on operand OP0, with result having machine-mode MODE.
2520 UNSIGNEDP is for the case where we have to widen the operands
2521 to perform the operation. It says to use zero-extension.
2523 If TARGET is nonzero, the value
2524 is generated there, if it is convenient to do so.
2525 In all cases an rtx is returned for the locus of the value;
2526 this may or may not be TARGET. */
2529 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
2530 int unsignedp)
2532 enum mode_class class;
2533 enum machine_mode wider_mode;
2534 rtx temp;
2535 rtx last = get_last_insn ();
2536 rtx pat;
2538 class = GET_MODE_CLASS (mode);
2540 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2542 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2543 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2544 rtx xop0 = op0;
2546 if (target)
2547 temp = target;
2548 else
2549 temp = gen_reg_rtx (mode);
2551 if (GET_MODE (xop0) != VOIDmode
2552 && GET_MODE (xop0) != mode0)
2553 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2555 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2557 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2558 xop0 = copy_to_mode_reg (mode0, xop0);
2560 if (!insn_data[icode].operand[0].predicate (temp, mode))
2561 temp = gen_reg_rtx (mode);
2563 pat = GEN_FCN (icode) (temp, xop0);
2564 if (pat)
2566 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2567 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
2569 delete_insns_since (last);
2570 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2573 emit_insn (pat);
2575 return temp;
2577 else
2578 delete_insns_since (last);
2581 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2583 /* Widening clz needs special treatment. */
2584 if (unoptab == clz_optab)
2586 temp = widen_clz (mode, op0, target);
2587 if (temp)
2588 return temp;
2589 else
2590 goto try_libcall;
2593 if (CLASS_HAS_WIDER_MODES_P (class))
2594 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2595 wider_mode != VOIDmode;
2596 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2598 if (unoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing)
2600 rtx xop0 = op0;
2602 /* For certain operations, we need not actually extend
2603 the narrow operand, as long as we will truncate the
2604 results to the same narrowness. */
2606 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2607 (unoptab == neg_optab
2608 || unoptab == one_cmpl_optab)
2609 && class == MODE_INT);
2611 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2612 unsignedp);
2614 if (temp)
2616 if (class != MODE_INT
2617 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2618 GET_MODE_BITSIZE (wider_mode)))
2620 if (target == 0)
2621 target = gen_reg_rtx (mode);
2622 convert_move (target, temp, 0);
2623 return target;
2625 else
2626 return gen_lowpart (mode, temp);
2628 else
2629 delete_insns_since (last);
2633 /* These can be done a word at a time. */
2634 if (unoptab == one_cmpl_optab
2635 && class == MODE_INT
2636 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
2637 && unoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
2639 int i;
2640 rtx insns;
2642 if (target == 0 || target == op0)
2643 target = gen_reg_rtx (mode);
2645 start_sequence ();
2647 /* Do the actual arithmetic. */
2648 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
2650 rtx target_piece = operand_subword (target, i, 1, mode);
2651 rtx x = expand_unop (word_mode, unoptab,
2652 operand_subword_force (op0, i, mode),
2653 target_piece, unsignedp);
2655 if (target_piece != x)
2656 emit_move_insn (target_piece, x);
2659 insns = get_insns ();
2660 end_sequence ();
2662 emit_no_conflict_block (insns, target, op0, NULL_RTX,
2663 gen_rtx_fmt_e (unoptab->code, mode,
2664 copy_rtx (op0)));
2665 return target;
2668 if (unoptab->code == NEG)
2670 /* Try negating floating point values by flipping the sign bit. */
2671 if (SCALAR_FLOAT_MODE_P (mode))
2673 temp = expand_absneg_bit (NEG, mode, op0, target);
2674 if (temp)
2675 return temp;
2678 /* If there is no negation pattern, and we have no negative zero,
2679 try subtracting from zero. */
2680 if (!HONOR_SIGNED_ZEROS (mode))
2682 temp = expand_binop (mode, (unoptab == negv_optab
2683 ? subv_optab : sub_optab),
2684 CONST0_RTX (mode), op0, target,
2685 unsignedp, OPTAB_DIRECT);
2686 if (temp)
2687 return temp;
2691 /* Try calculating parity (x) as popcount (x) % 2. */
2692 if (unoptab == parity_optab)
2694 temp = expand_parity (mode, op0, target);
2695 if (temp)
2696 return temp;
2699 try_libcall:
2700 /* Now try a library call in this mode. */
2701 if (unoptab->handlers[(int) mode].libfunc)
2703 rtx insns;
2704 rtx value;
2705 enum machine_mode outmode = mode;
2707 /* All of these functions return small values. Thus we choose to
2708 have them return something that isn't a double-word. */
2709 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
2710 || unoptab == popcount_optab || unoptab == parity_optab)
2711 outmode
2712 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node)));
2714 start_sequence ();
2716 /* Pass 1 for NO_QUEUE so we don't lose any increments
2717 if the libcall is cse'd or moved. */
2718 value = emit_library_call_value (unoptab->handlers[(int) mode].libfunc,
2719 NULL_RTX, LCT_CONST, outmode,
2720 1, op0, mode);
2721 insns = get_insns ();
2722 end_sequence ();
2724 target = gen_reg_rtx (outmode);
2725 emit_libcall_block (insns, target, value,
2726 gen_rtx_fmt_e (unoptab->code, outmode, op0));
2728 return target;
2731 /* It can't be done in this mode. Can we do it in a wider mode? */
2733 if (CLASS_HAS_WIDER_MODES_P (class))
2735 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2736 wider_mode != VOIDmode;
2737 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2739 if ((unoptab->handlers[(int) wider_mode].insn_code
2740 != CODE_FOR_nothing)
2741 || unoptab->handlers[(int) wider_mode].libfunc)
2743 rtx xop0 = op0;
2745 /* For certain operations, we need not actually extend
2746 the narrow operand, as long as we will truncate the
2747 results to the same narrowness. */
2749 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2750 (unoptab == neg_optab
2751 || unoptab == one_cmpl_optab)
2752 && class == MODE_INT);
2754 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2755 unsignedp);
2757 /* If we are generating clz using wider mode, adjust the
2758 result. */
2759 if (unoptab == clz_optab && temp != 0)
2760 temp = expand_binop (wider_mode, sub_optab, temp,
2761 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2762 - GET_MODE_BITSIZE (mode)),
2763 target, true, OPTAB_DIRECT);
2765 if (temp)
2767 if (class != MODE_INT)
2769 if (target == 0)
2770 target = gen_reg_rtx (mode);
2771 convert_move (target, temp, 0);
2772 return target;
2774 else
2775 return gen_lowpart (mode, temp);
2777 else
2778 delete_insns_since (last);
2783 /* One final attempt at implementing negation via subtraction,
2784 this time allowing widening of the operand. */
2785 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
2787 rtx temp;
2788 temp = expand_binop (mode,
2789 unoptab == negv_optab ? subv_optab : sub_optab,
2790 CONST0_RTX (mode), op0,
2791 target, unsignedp, OPTAB_LIB_WIDEN);
2792 if (temp)
2793 return temp;
2796 return 0;
2799 /* Emit code to compute the absolute value of OP0, with result to
2800 TARGET if convenient. (TARGET may be 0.) The return value says
2801 where the result actually is to be found.
2803 MODE is the mode of the operand; the mode of the result is
2804 different but can be deduced from MODE.
2809 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
2810 int result_unsignedp)
2812 rtx temp;
2814 if (! flag_trapv)
2815 result_unsignedp = 1;
2817 /* First try to do it with a special abs instruction. */
2818 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
2819 op0, target, 0);
2820 if (temp != 0)
2821 return temp;
2823 /* For floating point modes, try clearing the sign bit. */
2824 if (SCALAR_FLOAT_MODE_P (mode))
2826 temp = expand_absneg_bit (ABS, mode, op0, target);
2827 if (temp)
2828 return temp;
2831 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2832 if (smax_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
2833 && !HONOR_SIGNED_ZEROS (mode))
2835 rtx last = get_last_insn ();
2837 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
2838 if (temp != 0)
2839 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
2840 OPTAB_WIDEN);
2842 if (temp != 0)
2843 return temp;
2845 delete_insns_since (last);
2848 /* If this machine has expensive jumps, we can do integer absolute
2849 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2850 where W is the width of MODE. */
2852 if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
2854 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
2855 size_int (GET_MODE_BITSIZE (mode) - 1),
2856 NULL_RTX, 0);
2858 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
2859 OPTAB_LIB_WIDEN);
2860 if (temp != 0)
2861 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
2862 temp, extended, target, 0, OPTAB_LIB_WIDEN);
2864 if (temp != 0)
2865 return temp;
2868 return NULL_RTX;
2872 expand_abs (enum machine_mode mode, rtx op0, rtx target,
2873 int result_unsignedp, int safe)
2875 rtx temp, op1;
2877 if (! flag_trapv)
2878 result_unsignedp = 1;
2880 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
2881 if (temp != 0)
2882 return temp;
2884 /* If that does not win, use conditional jump and negate. */
2886 /* It is safe to use the target if it is the same
2887 as the source if this is also a pseudo register */
2888 if (op0 == target && REG_P (op0)
2889 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
2890 safe = 1;
2892 op1 = gen_label_rtx ();
2893 if (target == 0 || ! safe
2894 || GET_MODE (target) != mode
2895 || (MEM_P (target) && MEM_VOLATILE_P (target))
2896 || (REG_P (target)
2897 && REGNO (target) < FIRST_PSEUDO_REGISTER))
2898 target = gen_reg_rtx (mode);
2900 emit_move_insn (target, op0);
2901 NO_DEFER_POP;
2903 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
2904 NULL_RTX, NULL_RTX, op1);
2906 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
2907 target, target, 0);
2908 if (op0 != target)
2909 emit_move_insn (target, op0);
2910 emit_label (op1);
2911 OK_DEFER_POP;
2912 return target;
2915 /* A subroutine of expand_copysign, perform the copysign operation using the
2916 abs and neg primitives advertised to exist on the target. The assumption
2917 is that we have a split register file, and leaving op0 in fp registers,
2918 and not playing with subregs so much, will help the register allocator. */
2920 static rtx
2921 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
2922 int bitpos, bool op0_is_abs)
2924 enum machine_mode imode;
2925 HOST_WIDE_INT hi, lo;
2926 int word;
2927 rtx label;
2929 if (target == op1)
2930 target = NULL_RTX;
2932 if (!op0_is_abs)
2934 op0 = expand_unop (mode, abs_optab, op0, target, 0);
2935 if (op0 == NULL)
2936 return NULL_RTX;
2937 target = op0;
2939 else
2941 if (target == NULL_RTX)
2942 target = copy_to_reg (op0);
2943 else
2944 emit_move_insn (target, op0);
2947 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2949 imode = int_mode_for_mode (mode);
2950 if (imode == BLKmode)
2951 return NULL_RTX;
2952 op1 = gen_lowpart (imode, op1);
2954 else
2956 imode = word_mode;
2957 if (FLOAT_WORDS_BIG_ENDIAN)
2958 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2959 else
2960 word = bitpos / BITS_PER_WORD;
2961 bitpos = bitpos % BITS_PER_WORD;
2962 op1 = operand_subword_force (op1, word, mode);
2965 if (bitpos < HOST_BITS_PER_WIDE_INT)
2967 hi = 0;
2968 lo = (HOST_WIDE_INT) 1 << bitpos;
2970 else
2972 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2973 lo = 0;
2976 op1 = expand_binop (imode, and_optab, op1,
2977 immed_double_const (lo, hi, imode),
2978 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2980 label = gen_label_rtx ();
2981 emit_cmp_and_jump_insns (op1, const0_rtx, EQ, NULL_RTX, imode, 1, label);
2983 if (GET_CODE (op0) == CONST_DOUBLE)
2984 op0 = simplify_unary_operation (NEG, mode, op0, mode);
2985 else
2986 op0 = expand_unop (mode, neg_optab, op0, target, 0);
2987 if (op0 != target)
2988 emit_move_insn (target, op0);
2990 emit_label (label);
2992 return target;
2996 /* A subroutine of expand_copysign, perform the entire copysign operation
2997 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
2998 is true if op0 is known to have its sign bit clear. */
3000 static rtx
3001 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3002 int bitpos, bool op0_is_abs)
3004 enum machine_mode imode;
3005 HOST_WIDE_INT hi, lo;
3006 int word, nwords, i;
3007 rtx temp, insns;
3009 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3011 imode = int_mode_for_mode (mode);
3012 if (imode == BLKmode)
3013 return NULL_RTX;
3014 word = 0;
3015 nwords = 1;
3017 else
3019 imode = word_mode;
3021 if (FLOAT_WORDS_BIG_ENDIAN)
3022 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3023 else
3024 word = bitpos / BITS_PER_WORD;
3025 bitpos = bitpos % BITS_PER_WORD;
3026 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3029 if (bitpos < HOST_BITS_PER_WIDE_INT)
3031 hi = 0;
3032 lo = (HOST_WIDE_INT) 1 << bitpos;
3034 else
3036 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3037 lo = 0;
3040 if (target == 0 || target == op0 || target == op1)
3041 target = gen_reg_rtx (mode);
3043 if (nwords > 1)
3045 start_sequence ();
3047 for (i = 0; i < nwords; ++i)
3049 rtx targ_piece = operand_subword (target, i, 1, mode);
3050 rtx op0_piece = operand_subword_force (op0, i, mode);
3052 if (i == word)
3054 if (!op0_is_abs)
3055 op0_piece = expand_binop (imode, and_optab, op0_piece,
3056 immed_double_const (~lo, ~hi, imode),
3057 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3059 op1 = expand_binop (imode, and_optab,
3060 operand_subword_force (op1, i, mode),
3061 immed_double_const (lo, hi, imode),
3062 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3064 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3065 targ_piece, 1, OPTAB_LIB_WIDEN);
3066 if (temp != targ_piece)
3067 emit_move_insn (targ_piece, temp);
3069 else
3070 emit_move_insn (targ_piece, op0_piece);
3073 insns = get_insns ();
3074 end_sequence ();
3076 emit_no_conflict_block (insns, target, op0, op1, NULL_RTX);
3078 else
3080 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3081 immed_double_const (lo, hi, imode),
3082 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3084 op0 = gen_lowpart (imode, op0);
3085 if (!op0_is_abs)
3086 op0 = expand_binop (imode, and_optab, op0,
3087 immed_double_const (~lo, ~hi, imode),
3088 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3090 temp = expand_binop (imode, ior_optab, op0, op1,
3091 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3092 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3095 return target;
3098 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3099 scalar floating point mode. Return NULL if we do not know how to
3100 expand the operation inline. */
3103 expand_copysign (rtx op0, rtx op1, rtx target)
3105 enum machine_mode mode = GET_MODE (op0);
3106 const struct real_format *fmt;
3107 bool op0_is_abs;
3108 rtx temp;
3110 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3111 gcc_assert (GET_MODE (op1) == mode);
3113 /* First try to do it with a special instruction. */
3114 temp = expand_binop (mode, copysign_optab, op0, op1,
3115 target, 0, OPTAB_DIRECT);
3116 if (temp)
3117 return temp;
3119 fmt = REAL_MODE_FORMAT (mode);
3120 if (fmt == NULL || !fmt->has_signed_zero)
3121 return NULL_RTX;
3123 op0_is_abs = false;
3124 if (GET_CODE (op0) == CONST_DOUBLE)
3126 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3127 op0 = simplify_unary_operation (ABS, mode, op0, mode);
3128 op0_is_abs = true;
3131 if (fmt->signbit_ro >= 0
3132 && (GET_CODE (op0) == CONST_DOUBLE
3133 || (neg_optab->handlers[mode].insn_code != CODE_FOR_nothing
3134 && abs_optab->handlers[mode].insn_code != CODE_FOR_nothing)))
3136 temp = expand_copysign_absneg (mode, op0, op1, target,
3137 fmt->signbit_ro, op0_is_abs);
3138 if (temp)
3139 return temp;
3142 if (fmt->signbit_rw < 0)
3143 return NULL_RTX;
3144 return expand_copysign_bit (mode, op0, op1, target,
3145 fmt->signbit_rw, op0_is_abs);
3148 /* Generate an instruction whose insn-code is INSN_CODE,
3149 with two operands: an output TARGET and an input OP0.
3150 TARGET *must* be nonzero, and the output is always stored there.
3151 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3152 the value that is stored into TARGET. */
3154 void
3155 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
3157 rtx temp;
3158 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3159 rtx pat;
3161 temp = target;
3163 /* Now, if insn does not accept our operands, put them into pseudos. */
3165 if (!insn_data[icode].operand[1].predicate (op0, mode0))
3166 op0 = copy_to_mode_reg (mode0, op0);
3168 if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp)))
3169 temp = gen_reg_rtx (GET_MODE (temp));
3171 pat = GEN_FCN (icode) (temp, op0);
3173 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3174 add_equal_note (pat, temp, code, op0, NULL_RTX);
3176 emit_insn (pat);
3178 if (temp != target)
3179 emit_move_insn (target, temp);
3182 struct no_conflict_data
3184 rtx target, first, insn;
3185 bool must_stay;
3188 /* Called via note_stores by emit_no_conflict_block and emit_libcall_block.
3189 Set P->must_stay if the currently examined clobber / store has to stay
3190 in the list of insns that constitute the actual no_conflict block /
3191 libcall block. */
3192 static void
3193 no_conflict_move_test (rtx dest, rtx set, void *p0)
3195 struct no_conflict_data *p= p0;
3197 /* If this inns directly contributes to setting the target, it must stay. */
3198 if (reg_overlap_mentioned_p (p->target, dest))
3199 p->must_stay = true;
3200 /* If we haven't committed to keeping any other insns in the list yet,
3201 there is nothing more to check. */
3202 else if (p->insn == p->first)
3203 return;
3204 /* If this insn sets / clobbers a register that feeds one of the insns
3205 already in the list, this insn has to stay too. */
3206 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3207 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3208 || reg_used_between_p (dest, p->first, p->insn)
3209 /* Likewise if this insn depends on a register set by a previous
3210 insn in the list, or if it sets a result (presumably a hard
3211 register) that is set or clobbered by a previous insn.
3212 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3213 SET_DEST perform the former check on the address, and the latter
3214 check on the MEM. */
3215 || (GET_CODE (set) == SET
3216 && (modified_in_p (SET_SRC (set), p->first)
3217 || modified_in_p (SET_DEST (set), p->first)
3218 || modified_between_p (SET_SRC (set), p->first, p->insn)
3219 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3220 p->must_stay = true;
3223 /* Encapsulate the block starting at FIRST and ending with LAST, which is
3224 logically equivalent to EQUIV, so it gets manipulated as a unit if it
3225 is possible to do so. */
3227 static void
3228 maybe_encapsulate_block (rtx first, rtx last, rtx equiv)
3230 if (!flag_non_call_exceptions || !may_trap_p (equiv))
3232 /* We can't attach the REG_LIBCALL and REG_RETVAL notes when the
3233 encapsulated region would not be in one basic block, i.e. when
3234 there is a control_flow_insn_p insn between FIRST and LAST. */
3235 bool attach_libcall_retval_notes = true;
3236 rtx insn, next = NEXT_INSN (last);
3238 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3239 if (control_flow_insn_p (insn))
3241 attach_libcall_retval_notes = false;
3242 break;
3245 if (attach_libcall_retval_notes)
3247 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3248 REG_NOTES (first));
3249 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first,
3250 REG_NOTES (last));
3255 /* Emit code to perform a series of operations on a multi-word quantity, one
3256 word at a time.
3258 Such a block is preceded by a CLOBBER of the output, consists of multiple
3259 insns, each setting one word of the output, and followed by a SET copying
3260 the output to itself.
3262 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3263 note indicating that it doesn't conflict with the (also multi-word)
3264 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3265 notes.
3267 INSNS is a block of code generated to perform the operation, not including
3268 the CLOBBER and final copy. All insns that compute intermediate values
3269 are first emitted, followed by the block as described above.
3271 TARGET, OP0, and OP1 are the output and inputs of the operations,
3272 respectively. OP1 may be zero for a unary operation.
3274 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3275 on the last insn.
3277 If TARGET is not a register, INSNS is simply emitted with no special
3278 processing. Likewise if anything in INSNS is not an INSN or if
3279 there is a libcall block inside INSNS.
3281 The final insn emitted is returned. */
3284 emit_no_conflict_block (rtx insns, rtx target, rtx op0, rtx op1, rtx equiv)
3286 rtx prev, next, first, last, insn;
3288 if (!REG_P (target) || reload_in_progress)
3289 return emit_insn (insns);
3290 else
3291 for (insn = insns; insn; insn = NEXT_INSN (insn))
3292 if (!NONJUMP_INSN_P (insn)
3293 || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
3294 return emit_insn (insns);
3296 /* First emit all insns that do not store into words of the output and remove
3297 these from the list. */
3298 for (insn = insns; insn; insn = next)
3300 rtx note;
3301 struct no_conflict_data data;
3303 next = NEXT_INSN (insn);
3305 /* Some ports (cris) create a libcall regions at their own. We must
3306 avoid any potential nesting of LIBCALLs. */
3307 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3308 remove_note (insn, note);
3309 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3310 remove_note (insn, note);
3312 data.target = target;
3313 data.first = insns;
3314 data.insn = insn;
3315 data.must_stay = 0;
3316 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3317 if (! data.must_stay)
3319 if (PREV_INSN (insn))
3320 NEXT_INSN (PREV_INSN (insn)) = next;
3321 else
3322 insns = next;
3324 if (next)
3325 PREV_INSN (next) = PREV_INSN (insn);
3327 add_insn (insn);
3331 prev = get_last_insn ();
3333 /* Now write the CLOBBER of the output, followed by the setting of each
3334 of the words, followed by the final copy. */
3335 if (target != op0 && target != op1)
3336 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
3338 for (insn = insns; insn; insn = next)
3340 next = NEXT_INSN (insn);
3341 add_insn (insn);
3343 if (op1 && REG_P (op1))
3344 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op1,
3345 REG_NOTES (insn));
3347 if (op0 && REG_P (op0))
3348 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op0,
3349 REG_NOTES (insn));
3352 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3353 != CODE_FOR_nothing)
3355 last = emit_move_insn (target, target);
3356 if (equiv)
3357 set_unique_reg_note (last, REG_EQUAL, equiv);
3359 else
3361 last = get_last_insn ();
3363 /* Remove any existing REG_EQUAL note from "last", or else it will
3364 be mistaken for a note referring to the full contents of the
3365 alleged libcall value when found together with the REG_RETVAL
3366 note added below. An existing note can come from an insn
3367 expansion at "last". */
3368 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3371 if (prev == 0)
3372 first = get_insns ();
3373 else
3374 first = NEXT_INSN (prev);
3376 maybe_encapsulate_block (first, last, equiv);
3378 return last;
3381 /* Emit code to make a call to a constant function or a library call.
3383 INSNS is a list containing all insns emitted in the call.
3384 These insns leave the result in RESULT. Our block is to copy RESULT
3385 to TARGET, which is logically equivalent to EQUIV.
3387 We first emit any insns that set a pseudo on the assumption that these are
3388 loading constants into registers; doing so allows them to be safely cse'ed
3389 between blocks. Then we emit all the other insns in the block, followed by
3390 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3391 note with an operand of EQUIV.
3393 Moving assignments to pseudos outside of the block is done to improve
3394 the generated code, but is not required to generate correct code,
3395 hence being unable to move an assignment is not grounds for not making
3396 a libcall block. There are two reasons why it is safe to leave these
3397 insns inside the block: First, we know that these pseudos cannot be
3398 used in generated RTL outside the block since they are created for
3399 temporary purposes within the block. Second, CSE will not record the
3400 values of anything set inside a libcall block, so we know they must
3401 be dead at the end of the block.
3403 Except for the first group of insns (the ones setting pseudos), the
3404 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3406 void
3407 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3409 rtx final_dest = target;
3410 rtx prev, next, first, last, insn;
3412 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3413 into a MEM later. Protect the libcall block from this change. */
3414 if (! REG_P (target) || REG_USERVAR_P (target))
3415 target = gen_reg_rtx (GET_MODE (target));
3417 /* If we're using non-call exceptions, a libcall corresponding to an
3418 operation that may trap may also trap. */
3419 if (flag_non_call_exceptions && may_trap_p (equiv))
3421 for (insn = insns; insn; insn = NEXT_INSN (insn))
3422 if (CALL_P (insn))
3424 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3426 if (note != 0 && INTVAL (XEXP (note, 0)) <= 0)
3427 remove_note (insn, note);
3430 else
3431 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3432 reg note to indicate that this call cannot throw or execute a nonlocal
3433 goto (unless there is already a REG_EH_REGION note, in which case
3434 we update it). */
3435 for (insn = insns; insn; insn = NEXT_INSN (insn))
3436 if (CALL_P (insn))
3438 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3440 if (note != 0)
3441 XEXP (note, 0) = constm1_rtx;
3442 else
3443 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx,
3444 REG_NOTES (insn));
3447 /* First emit all insns that set pseudos. Remove them from the list as
3448 we go. Avoid insns that set pseudos which were referenced in previous
3449 insns. These can be generated by move_by_pieces, for example,
3450 to update an address. Similarly, avoid insns that reference things
3451 set in previous insns. */
3453 for (insn = insns; insn; insn = next)
3455 rtx set = single_set (insn);
3456 rtx note;
3458 /* Some ports (cris) create a libcall regions at their own. We must
3459 avoid any potential nesting of LIBCALLs. */
3460 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3461 remove_note (insn, note);
3462 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3463 remove_note (insn, note);
3465 next = NEXT_INSN (insn);
3467 if (set != 0 && REG_P (SET_DEST (set))
3468 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3470 struct no_conflict_data data;
3472 data.target = const0_rtx;
3473 data.first = insns;
3474 data.insn = insn;
3475 data.must_stay = 0;
3476 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3477 if (! data.must_stay)
3479 if (PREV_INSN (insn))
3480 NEXT_INSN (PREV_INSN (insn)) = next;
3481 else
3482 insns = next;
3484 if (next)
3485 PREV_INSN (next) = PREV_INSN (insn);
3487 add_insn (insn);
3491 /* Some ports use a loop to copy large arguments onto the stack.
3492 Don't move anything outside such a loop. */
3493 if (LABEL_P (insn))
3494 break;
3497 prev = get_last_insn ();
3499 /* Write the remaining insns followed by the final copy. */
3501 for (insn = insns; insn; insn = next)
3503 next = NEXT_INSN (insn);
3505 add_insn (insn);
3508 last = emit_move_insn (target, result);
3509 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3510 != CODE_FOR_nothing)
3511 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
3512 else
3514 /* Remove any existing REG_EQUAL note from "last", or else it will
3515 be mistaken for a note referring to the full contents of the
3516 libcall value when found together with the REG_RETVAL note added
3517 below. An existing note can come from an insn expansion at
3518 "last". */
3519 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3522 if (final_dest != target)
3523 emit_move_insn (final_dest, target);
3525 if (prev == 0)
3526 first = get_insns ();
3527 else
3528 first = NEXT_INSN (prev);
3530 maybe_encapsulate_block (first, last, equiv);
3533 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3534 PURPOSE describes how this comparison will be used. CODE is the rtx
3535 comparison code we will be using.
3537 ??? Actually, CODE is slightly weaker than that. A target is still
3538 required to implement all of the normal bcc operations, but not
3539 required to implement all (or any) of the unordered bcc operations. */
3542 can_compare_p (enum rtx_code code, enum machine_mode mode,
3543 enum can_compare_purpose purpose)
3547 if (cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3549 if (purpose == ccp_jump)
3550 return bcc_gen_fctn[(int) code] != NULL;
3551 else if (purpose == ccp_store_flag)
3552 return setcc_gen_code[(int) code] != CODE_FOR_nothing;
3553 else
3554 /* There's only one cmov entry point, and it's allowed to fail. */
3555 return 1;
3557 if (purpose == ccp_jump
3558 && cbranch_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3559 return 1;
3560 if (purpose == ccp_cmov
3561 && cmov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3562 return 1;
3563 if (purpose == ccp_store_flag
3564 && cstore_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3565 return 1;
3566 mode = GET_MODE_WIDER_MODE (mode);
3568 while (mode != VOIDmode);
3570 return 0;
3573 /* This function is called when we are going to emit a compare instruction that
3574 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3576 *PMODE is the mode of the inputs (in case they are const_int).
3577 *PUNSIGNEDP nonzero says that the operands are unsigned;
3578 this matters if they need to be widened.
3580 If they have mode BLKmode, then SIZE specifies the size of both operands.
3582 This function performs all the setup necessary so that the caller only has
3583 to emit a single comparison insn. This setup can involve doing a BLKmode
3584 comparison or emitting a library call to perform the comparison if no insn
3585 is available to handle it.
3586 The values which are passed in through pointers can be modified; the caller
3587 should perform the comparison on the modified values. Constant
3588 comparisons must have already been folded. */
3590 static void
3591 prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
3592 enum machine_mode *pmode, int *punsignedp,
3593 enum can_compare_purpose purpose)
3595 enum machine_mode mode = *pmode;
3596 rtx x = *px, y = *py;
3597 int unsignedp = *punsignedp;
3599 /* If we are inside an appropriately-short loop and we are optimizing,
3600 force expensive constants into a register. */
3601 if (CONSTANT_P (x) && optimize
3602 && rtx_cost (x, COMPARE) > COSTS_N_INSNS (1))
3603 x = force_reg (mode, x);
3605 if (CONSTANT_P (y) && optimize
3606 && rtx_cost (y, COMPARE) > COSTS_N_INSNS (1))
3607 y = force_reg (mode, y);
3609 #ifdef HAVE_cc0
3610 /* Make sure if we have a canonical comparison. The RTL
3611 documentation states that canonical comparisons are required only
3612 for targets which have cc0. */
3613 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3614 #endif
3616 /* Don't let both operands fail to indicate the mode. */
3617 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3618 x = force_reg (mode, x);
3620 /* Handle all BLKmode compares. */
3622 if (mode == BLKmode)
3624 enum machine_mode cmp_mode, result_mode;
3625 enum insn_code cmp_code;
3626 tree length_type;
3627 rtx libfunc;
3628 rtx result;
3629 rtx opalign
3630 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3632 gcc_assert (size);
3634 /* Try to use a memory block compare insn - either cmpstr
3635 or cmpmem will do. */
3636 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
3637 cmp_mode != VOIDmode;
3638 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
3640 cmp_code = cmpmem_optab[cmp_mode];
3641 if (cmp_code == CODE_FOR_nothing)
3642 cmp_code = cmpstr_optab[cmp_mode];
3643 if (cmp_code == CODE_FOR_nothing)
3644 cmp_code = cmpstrn_optab[cmp_mode];
3645 if (cmp_code == CODE_FOR_nothing)
3646 continue;
3648 /* Must make sure the size fits the insn's mode. */
3649 if ((GET_CODE (size) == CONST_INT
3650 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
3651 || (GET_MODE_BITSIZE (GET_MODE (size))
3652 > GET_MODE_BITSIZE (cmp_mode)))
3653 continue;
3655 result_mode = insn_data[cmp_code].operand[0].mode;
3656 result = gen_reg_rtx (result_mode);
3657 size = convert_to_mode (cmp_mode, size, 1);
3658 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
3660 *px = result;
3661 *py = const0_rtx;
3662 *pmode = result_mode;
3663 return;
3666 /* Otherwise call a library function, memcmp. */
3667 libfunc = memcmp_libfunc;
3668 length_type = sizetype;
3669 result_mode = TYPE_MODE (integer_type_node);
3670 cmp_mode = TYPE_MODE (length_type);
3671 size = convert_to_mode (TYPE_MODE (length_type), size,
3672 TYPE_UNSIGNED (length_type));
3674 result = emit_library_call_value (libfunc, 0, LCT_PURE_MAKE_BLOCK,
3675 result_mode, 3,
3676 XEXP (x, 0), Pmode,
3677 XEXP (y, 0), Pmode,
3678 size, cmp_mode);
3679 *px = result;
3680 *py = const0_rtx;
3681 *pmode = result_mode;
3682 return;
3685 /* Don't allow operands to the compare to trap, as that can put the
3686 compare and branch in different basic blocks. */
3687 if (flag_non_call_exceptions)
3689 if (may_trap_p (x))
3690 x = force_reg (mode, x);
3691 if (may_trap_p (y))
3692 y = force_reg (mode, y);
3695 *px = x;
3696 *py = y;
3697 if (can_compare_p (*pcomparison, mode, purpose))
3698 return;
3700 /* Handle a lib call just for the mode we are using. */
3702 if (cmp_optab->handlers[(int) mode].libfunc && !SCALAR_FLOAT_MODE_P (mode))
3704 rtx libfunc = cmp_optab->handlers[(int) mode].libfunc;
3705 rtx result;
3707 /* If we want unsigned, and this mode has a distinct unsigned
3708 comparison routine, use that. */
3709 if (unsignedp && ucmp_optab->handlers[(int) mode].libfunc)
3710 libfunc = ucmp_optab->handlers[(int) mode].libfunc;
3712 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK,
3713 word_mode, 2, x, mode, y, mode);
3715 /* There are two kinds of comparison routines. Biased routines
3716 return 0/1/2, and unbiased routines return -1/0/1. Other parts
3717 of gcc expect that the comparison operation is equivalent
3718 to the modified comparison. For signed comparisons compare the
3719 result against 1 in the biased case, and zero in the unbiased
3720 case. For unsigned comparisons always compare against 1 after
3721 biasing the unbiased result by adding 1. This gives us a way to
3722 represent LTU. */
3723 *px = result;
3724 *pmode = word_mode;
3725 *py = const1_rtx;
3727 if (!TARGET_LIB_INT_CMP_BIASED)
3729 if (*punsignedp)
3730 *px = plus_constant (result, 1);
3731 else
3732 *py = const0_rtx;
3734 return;
3737 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3738 prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
3741 /* Before emitting an insn with code ICODE, make sure that X, which is going
3742 to be used for operand OPNUM of the insn, is converted from mode MODE to
3743 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3744 that it is accepted by the operand predicate. Return the new value. */
3746 static rtx
3747 prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
3748 enum machine_mode wider_mode, int unsignedp)
3750 if (mode != wider_mode)
3751 x = convert_modes (wider_mode, mode, x, unsignedp);
3753 if (!insn_data[icode].operand[opnum].predicate
3754 (x, insn_data[icode].operand[opnum].mode))
3756 if (no_new_pseudos)
3757 return NULL_RTX;
3758 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x);
3761 return x;
3764 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3765 we can do the comparison.
3766 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3767 be NULL_RTX which indicates that only a comparison is to be generated. */
3769 static void
3770 emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
3771 enum rtx_code comparison, int unsignedp, rtx label)
3773 rtx test = gen_rtx_fmt_ee (comparison, mode, x, y);
3774 enum mode_class class = GET_MODE_CLASS (mode);
3775 enum machine_mode wider_mode = mode;
3777 /* Try combined insns first. */
3780 enum insn_code icode;
3781 PUT_MODE (test, wider_mode);
3783 if (label)
3785 icode = cbranch_optab->handlers[(int) wider_mode].insn_code;
3787 if (icode != CODE_FOR_nothing
3788 && insn_data[icode].operand[0].predicate (test, wider_mode))
3790 x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp);
3791 y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp);
3792 emit_jump_insn (GEN_FCN (icode) (test, x, y, label));
3793 return;
3797 /* Handle some compares against zero. */
3798 icode = (int) tst_optab->handlers[(int) wider_mode].insn_code;
3799 if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing)
3801 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3802 emit_insn (GEN_FCN (icode) (x));
3803 if (label)
3804 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3805 return;
3808 /* Handle compares for which there is a directly suitable insn. */
3810 icode = (int) cmp_optab->handlers[(int) wider_mode].insn_code;
3811 if (icode != CODE_FOR_nothing)
3813 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3814 y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp);
3815 emit_insn (GEN_FCN (icode) (x, y));
3816 if (label)
3817 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3818 return;
3821 if (!CLASS_HAS_WIDER_MODES_P (class))
3822 break;
3824 wider_mode = GET_MODE_WIDER_MODE (wider_mode);
3826 while (wider_mode != VOIDmode);
3828 gcc_unreachable ();
3831 /* Generate code to compare X with Y so that the condition codes are
3832 set and to jump to LABEL if the condition is true. If X is a
3833 constant and Y is not a constant, then the comparison is swapped to
3834 ensure that the comparison RTL has the canonical form.
3836 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3837 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3838 the proper branch condition code.
3840 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3842 MODE is the mode of the inputs (in case they are const_int).
3844 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3845 be passed unchanged to emit_cmp_insn, then potentially converted into an
3846 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3848 void
3849 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
3850 enum machine_mode mode, int unsignedp, rtx label)
3852 rtx op0 = x, op1 = y;
3854 /* Swap operands and condition to ensure canonical RTL. */
3855 if (swap_commutative_operands_p (x, y))
3857 /* If we're not emitting a branch, this means some caller
3858 is out of sync. */
3859 gcc_assert (label);
3861 op0 = y, op1 = x;
3862 comparison = swap_condition (comparison);
3865 #ifdef HAVE_cc0
3866 /* If OP0 is still a constant, then both X and Y must be constants.
3867 Force X into a register to create canonical RTL. */
3868 if (CONSTANT_P (op0))
3869 op0 = force_reg (mode, op0);
3870 #endif
3872 if (unsignedp)
3873 comparison = unsigned_condition (comparison);
3875 prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp,
3876 ccp_jump);
3877 emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label);
3880 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3882 void
3883 emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
3884 enum machine_mode mode, int unsignedp)
3886 emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0);
3889 /* Emit a library call comparison between floating point X and Y.
3890 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3892 static void
3893 prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
3894 enum machine_mode *pmode, int *punsignedp)
3896 enum rtx_code comparison = *pcomparison;
3897 enum rtx_code swapped = swap_condition (comparison);
3898 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
3899 rtx x = *px;
3900 rtx y = *py;
3901 enum machine_mode orig_mode = GET_MODE (x);
3902 enum machine_mode mode;
3903 rtx value, target, insns, equiv;
3904 rtx libfunc = 0;
3905 bool reversed_p = false;
3907 for (mode = orig_mode;
3908 mode != VOIDmode;
3909 mode = GET_MODE_WIDER_MODE (mode))
3911 if ((libfunc = code_to_optab[comparison]->handlers[mode].libfunc))
3912 break;
3914 if ((libfunc = code_to_optab[swapped]->handlers[mode].libfunc))
3916 rtx tmp;
3917 tmp = x; x = y; y = tmp;
3918 comparison = swapped;
3919 break;
3922 if ((libfunc = code_to_optab[reversed]->handlers[mode].libfunc)
3923 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, reversed))
3925 comparison = reversed;
3926 reversed_p = true;
3927 break;
3931 gcc_assert (mode != VOIDmode);
3933 if (mode != orig_mode)
3935 x = convert_to_mode (mode, x, 0);
3936 y = convert_to_mode (mode, y, 0);
3939 /* Attach a REG_EQUAL note describing the semantics of the libcall to
3940 the RTL. The allows the RTL optimizers to delete the libcall if the
3941 condition can be determined at compile-time. */
3942 if (comparison == UNORDERED)
3944 rtx temp = simplify_gen_relational (NE, word_mode, mode, x, x);
3945 equiv = simplify_gen_relational (NE, word_mode, mode, y, y);
3946 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
3947 temp, const_true_rtx, equiv);
3949 else
3951 equiv = simplify_gen_relational (comparison, word_mode, mode, x, y);
3952 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
3954 rtx true_rtx, false_rtx;
3956 switch (comparison)
3958 case EQ:
3959 true_rtx = const0_rtx;
3960 false_rtx = const_true_rtx;
3961 break;
3963 case NE:
3964 true_rtx = const_true_rtx;
3965 false_rtx = const0_rtx;
3966 break;
3968 case GT:
3969 true_rtx = const1_rtx;
3970 false_rtx = const0_rtx;
3971 break;
3973 case GE:
3974 true_rtx = const0_rtx;
3975 false_rtx = constm1_rtx;
3976 break;
3978 case LT:
3979 true_rtx = constm1_rtx;
3980 false_rtx = const0_rtx;
3981 break;
3983 case LE:
3984 true_rtx = const0_rtx;
3985 false_rtx = const1_rtx;
3986 break;
3988 default:
3989 gcc_unreachable ();
3991 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
3992 equiv, true_rtx, false_rtx);
3996 start_sequence ();
3997 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
3998 word_mode, 2, x, mode, y, mode);
3999 insns = get_insns ();
4000 end_sequence ();
4002 target = gen_reg_rtx (word_mode);
4003 emit_libcall_block (insns, target, value, equiv);
4005 if (comparison == UNORDERED
4006 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4007 comparison = reversed_p ? EQ : NE;
4009 *px = target;
4010 *py = const0_rtx;
4011 *pmode = word_mode;
4012 *pcomparison = comparison;
4013 *punsignedp = 0;
4016 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4018 void
4019 emit_indirect_jump (rtx loc)
4021 if (!insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate
4022 (loc, Pmode))
4023 loc = copy_to_mode_reg (Pmode, loc);
4025 emit_jump_insn (gen_indirect_jump (loc));
4026 emit_barrier ();
4029 #ifdef HAVE_conditional_move
4031 /* Emit a conditional move instruction if the machine supports one for that
4032 condition and machine mode.
4034 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4035 the mode to use should they be constants. If it is VOIDmode, they cannot
4036 both be constants.
4038 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4039 should be stored there. MODE is the mode to use should they be constants.
4040 If it is VOIDmode, they cannot both be constants.
4042 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4043 is not supported. */
4046 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4047 enum machine_mode cmode, rtx op2, rtx op3,
4048 enum machine_mode mode, int unsignedp)
4050 rtx tem, subtarget, comparison, insn;
4051 enum insn_code icode;
4052 enum rtx_code reversed;
4054 /* If one operand is constant, make it the second one. Only do this
4055 if the other operand is not constant as well. */
4057 if (swap_commutative_operands_p (op0, op1))
4059 tem = op0;
4060 op0 = op1;
4061 op1 = tem;
4062 code = swap_condition (code);
4065 /* get_condition will prefer to generate LT and GT even if the old
4066 comparison was against zero, so undo that canonicalization here since
4067 comparisons against zero are cheaper. */
4068 if (code == LT && op1 == const1_rtx)
4069 code = LE, op1 = const0_rtx;
4070 else if (code == GT && op1 == constm1_rtx)
4071 code = GE, op1 = const0_rtx;
4073 if (cmode == VOIDmode)
4074 cmode = GET_MODE (op0);
4076 if (swap_commutative_operands_p (op2, op3)
4077 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4078 != UNKNOWN))
4080 tem = op2;
4081 op2 = op3;
4082 op3 = tem;
4083 code = reversed;
4086 if (mode == VOIDmode)
4087 mode = GET_MODE (op2);
4089 icode = movcc_gen_code[mode];
4091 if (icode == CODE_FOR_nothing)
4092 return 0;
4094 if (!target)
4095 target = gen_reg_rtx (mode);
4097 subtarget = target;
4099 /* If the insn doesn't accept these operands, put them in pseudos. */
4101 if (!insn_data[icode].operand[0].predicate
4102 (subtarget, insn_data[icode].operand[0].mode))
4103 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4105 if (!insn_data[icode].operand[2].predicate
4106 (op2, insn_data[icode].operand[2].mode))
4107 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4109 if (!insn_data[icode].operand[3].predicate
4110 (op3, insn_data[icode].operand[3].mode))
4111 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4113 /* Everything should now be in the suitable form, so emit the compare insn
4114 and then the conditional move. */
4116 comparison
4117 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4119 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4120 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4121 return NULL and let the caller figure out how best to deal with this
4122 situation. */
4123 if (GET_CODE (comparison) != code)
4124 return NULL_RTX;
4126 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4128 /* If that failed, then give up. */
4129 if (insn == 0)
4130 return 0;
4132 emit_insn (insn);
4134 if (subtarget != target)
4135 convert_move (target, subtarget, 0);
4137 return target;
4140 /* Return nonzero if a conditional move of mode MODE is supported.
4142 This function is for combine so it can tell whether an insn that looks
4143 like a conditional move is actually supported by the hardware. If we
4144 guess wrong we lose a bit on optimization, but that's it. */
4145 /* ??? sparc64 supports conditionally moving integers values based on fp
4146 comparisons, and vice versa. How do we handle them? */
4149 can_conditionally_move_p (enum machine_mode mode)
4151 if (movcc_gen_code[mode] != CODE_FOR_nothing)
4152 return 1;
4154 return 0;
4157 #endif /* HAVE_conditional_move */
4159 /* Emit a conditional addition instruction if the machine supports one for that
4160 condition and machine mode.
4162 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4163 the mode to use should they be constants. If it is VOIDmode, they cannot
4164 both be constants.
4166 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4167 should be stored there. MODE is the mode to use should they be constants.
4168 If it is VOIDmode, they cannot both be constants.
4170 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4171 is not supported. */
4174 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4175 enum machine_mode cmode, rtx op2, rtx op3,
4176 enum machine_mode mode, int unsignedp)
4178 rtx tem, subtarget, comparison, insn;
4179 enum insn_code icode;
4180 enum rtx_code reversed;
4182 /* If one operand is constant, make it the second one. Only do this
4183 if the other operand is not constant as well. */
4185 if (swap_commutative_operands_p (op0, op1))
4187 tem = op0;
4188 op0 = op1;
4189 op1 = tem;
4190 code = swap_condition (code);
4193 /* get_condition will prefer to generate LT and GT even if the old
4194 comparison was against zero, so undo that canonicalization here since
4195 comparisons against zero are cheaper. */
4196 if (code == LT && op1 == const1_rtx)
4197 code = LE, op1 = const0_rtx;
4198 else if (code == GT && op1 == constm1_rtx)
4199 code = GE, op1 = const0_rtx;
4201 if (cmode == VOIDmode)
4202 cmode = GET_MODE (op0);
4204 if (swap_commutative_operands_p (op2, op3)
4205 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4206 != UNKNOWN))
4208 tem = op2;
4209 op2 = op3;
4210 op3 = tem;
4211 code = reversed;
4214 if (mode == VOIDmode)
4215 mode = GET_MODE (op2);
4217 icode = addcc_optab->handlers[(int) mode].insn_code;
4219 if (icode == CODE_FOR_nothing)
4220 return 0;
4222 if (!target)
4223 target = gen_reg_rtx (mode);
4225 /* If the insn doesn't accept these operands, put them in pseudos. */
4227 if (!insn_data[icode].operand[0].predicate
4228 (target, insn_data[icode].operand[0].mode))
4229 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4230 else
4231 subtarget = target;
4233 if (!insn_data[icode].operand[2].predicate
4234 (op2, insn_data[icode].operand[2].mode))
4235 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4237 if (!insn_data[icode].operand[3].predicate
4238 (op3, insn_data[icode].operand[3].mode))
4239 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4241 /* Everything should now be in the suitable form, so emit the compare insn
4242 and then the conditional move. */
4244 comparison
4245 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4247 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4248 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4249 return NULL and let the caller figure out how best to deal with this
4250 situation. */
4251 if (GET_CODE (comparison) != code)
4252 return NULL_RTX;
4254 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4256 /* If that failed, then give up. */
4257 if (insn == 0)
4258 return 0;
4260 emit_insn (insn);
4262 if (subtarget != target)
4263 convert_move (target, subtarget, 0);
4265 return target;
4268 /* These functions attempt to generate an insn body, rather than
4269 emitting the insn, but if the gen function already emits them, we
4270 make no attempt to turn them back into naked patterns. */
4272 /* Generate and return an insn body to add Y to X. */
4275 gen_add2_insn (rtx x, rtx y)
4277 int icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4279 gcc_assert (insn_data[icode].operand[0].predicate
4280 (x, insn_data[icode].operand[0].mode));
4281 gcc_assert (insn_data[icode].operand[1].predicate
4282 (x, insn_data[icode].operand[1].mode));
4283 gcc_assert (insn_data[icode].operand[2].predicate
4284 (y, insn_data[icode].operand[2].mode));
4286 return GEN_FCN (icode) (x, x, y);
4289 /* Generate and return an insn body to add r1 and c,
4290 storing the result in r0. */
4292 gen_add3_insn (rtx r0, rtx r1, rtx c)
4294 int icode = (int) add_optab->handlers[(int) GET_MODE (r0)].insn_code;
4296 if (icode == CODE_FOR_nothing
4297 || !(insn_data[icode].operand[0].predicate
4298 (r0, insn_data[icode].operand[0].mode))
4299 || !(insn_data[icode].operand[1].predicate
4300 (r1, insn_data[icode].operand[1].mode))
4301 || !(insn_data[icode].operand[2].predicate
4302 (c, insn_data[icode].operand[2].mode)))
4303 return NULL_RTX;
4305 return GEN_FCN (icode) (r0, r1, c);
4309 have_add2_insn (rtx x, rtx y)
4311 int icode;
4313 gcc_assert (GET_MODE (x) != VOIDmode);
4315 icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4317 if (icode == CODE_FOR_nothing)
4318 return 0;
4320 if (!(insn_data[icode].operand[0].predicate
4321 (x, insn_data[icode].operand[0].mode))
4322 || !(insn_data[icode].operand[1].predicate
4323 (x, insn_data[icode].operand[1].mode))
4324 || !(insn_data[icode].operand[2].predicate
4325 (y, insn_data[icode].operand[2].mode)))
4326 return 0;
4328 return 1;
4331 /* Generate and return an insn body to subtract Y from X. */
4334 gen_sub2_insn (rtx x, rtx y)
4336 int icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4338 gcc_assert (insn_data[icode].operand[0].predicate
4339 (x, insn_data[icode].operand[0].mode));
4340 gcc_assert (insn_data[icode].operand[1].predicate
4341 (x, insn_data[icode].operand[1].mode));
4342 gcc_assert (insn_data[icode].operand[2].predicate
4343 (y, insn_data[icode].operand[2].mode));
4345 return GEN_FCN (icode) (x, x, y);
4348 /* Generate and return an insn body to subtract r1 and c,
4349 storing the result in r0. */
4351 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4353 int icode = (int) sub_optab->handlers[(int) GET_MODE (r0)].insn_code;
4355 if (icode == CODE_FOR_nothing
4356 || !(insn_data[icode].operand[0].predicate
4357 (r0, insn_data[icode].operand[0].mode))
4358 || !(insn_data[icode].operand[1].predicate
4359 (r1, insn_data[icode].operand[1].mode))
4360 || !(insn_data[icode].operand[2].predicate
4361 (c, insn_data[icode].operand[2].mode)))
4362 return NULL_RTX;
4364 return GEN_FCN (icode) (r0, r1, c);
4368 have_sub2_insn (rtx x, rtx y)
4370 int icode;
4372 gcc_assert (GET_MODE (x) != VOIDmode);
4374 icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4376 if (icode == CODE_FOR_nothing)
4377 return 0;
4379 if (!(insn_data[icode].operand[0].predicate
4380 (x, insn_data[icode].operand[0].mode))
4381 || !(insn_data[icode].operand[1].predicate
4382 (x, insn_data[icode].operand[1].mode))
4383 || !(insn_data[icode].operand[2].predicate
4384 (y, insn_data[icode].operand[2].mode)))
4385 return 0;
4387 return 1;
4390 /* Generate the body of an instruction to copy Y into X.
4391 It may be a list of insns, if one insn isn't enough. */
4394 gen_move_insn (rtx x, rtx y)
4396 rtx seq;
4398 start_sequence ();
4399 emit_move_insn_1 (x, y);
4400 seq = get_insns ();
4401 end_sequence ();
4402 return seq;
4405 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4406 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4407 no such operation exists, CODE_FOR_nothing will be returned. */
4409 enum insn_code
4410 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4411 int unsignedp)
4413 convert_optab tab;
4414 #ifdef HAVE_ptr_extend
4415 if (unsignedp < 0)
4416 return CODE_FOR_ptr_extend;
4417 #endif
4419 tab = unsignedp ? zext_optab : sext_optab;
4420 return tab->handlers[to_mode][from_mode].insn_code;
4423 /* Generate the body of an insn to extend Y (with mode MFROM)
4424 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4427 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4428 enum machine_mode mfrom, int unsignedp)
4430 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4431 return GEN_FCN (icode) (x, y);
4434 /* can_fix_p and can_float_p say whether the target machine
4435 can directly convert a given fixed point type to
4436 a given floating point type, or vice versa.
4437 The returned value is the CODE_FOR_... value to use,
4438 or CODE_FOR_nothing if these modes cannot be directly converted.
4440 *TRUNCP_PTR is set to 1 if it is necessary to output
4441 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4443 static enum insn_code
4444 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4445 int unsignedp, int *truncp_ptr)
4447 convert_optab tab;
4448 enum insn_code icode;
4450 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4451 icode = tab->handlers[fixmode][fltmode].insn_code;
4452 if (icode != CODE_FOR_nothing)
4454 *truncp_ptr = 0;
4455 return icode;
4458 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4459 for this to work. We need to rework the fix* and ftrunc* patterns
4460 and documentation. */
4461 tab = unsignedp ? ufix_optab : sfix_optab;
4462 icode = tab->handlers[fixmode][fltmode].insn_code;
4463 if (icode != CODE_FOR_nothing
4464 && ftrunc_optab->handlers[fltmode].insn_code != CODE_FOR_nothing)
4466 *truncp_ptr = 1;
4467 return icode;
4470 *truncp_ptr = 0;
4471 return CODE_FOR_nothing;
4474 static enum insn_code
4475 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4476 int unsignedp)
4478 convert_optab tab;
4480 tab = unsignedp ? ufloat_optab : sfloat_optab;
4481 return tab->handlers[fltmode][fixmode].insn_code;
4484 /* Generate code to convert FROM to floating point
4485 and store in TO. FROM must be fixed point and not VOIDmode.
4486 UNSIGNEDP nonzero means regard FROM as unsigned.
4487 Normally this is done by correcting the final value
4488 if it is negative. */
4490 void
4491 expand_float (rtx to, rtx from, int unsignedp)
4493 enum insn_code icode;
4494 rtx target = to;
4495 enum machine_mode fmode, imode;
4496 bool can_do_signed = false;
4498 /* Crash now, because we won't be able to decide which mode to use. */
4499 gcc_assert (GET_MODE (from) != VOIDmode);
4501 /* Look for an insn to do the conversion. Do it in the specified
4502 modes if possible; otherwise convert either input, output or both to
4503 wider mode. If the integer mode is wider than the mode of FROM,
4504 we can do the conversion signed even if the input is unsigned. */
4506 for (fmode = GET_MODE (to); fmode != VOIDmode;
4507 fmode = GET_MODE_WIDER_MODE (fmode))
4508 for (imode = GET_MODE (from); imode != VOIDmode;
4509 imode = GET_MODE_WIDER_MODE (imode))
4511 int doing_unsigned = unsignedp;
4513 if (fmode != GET_MODE (to)
4514 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from)))
4515 continue;
4517 icode = can_float_p (fmode, imode, unsignedp);
4518 if (icode == CODE_FOR_nothing && unsignedp)
4520 enum insn_code scode = can_float_p (fmode, imode, 0);
4521 if (scode != CODE_FOR_nothing)
4522 can_do_signed = true;
4523 if (imode != GET_MODE (from))
4524 icode = scode, doing_unsigned = 0;
4527 if (icode != CODE_FOR_nothing)
4529 if (imode != GET_MODE (from))
4530 from = convert_to_mode (imode, from, unsignedp);
4532 if (fmode != GET_MODE (to))
4533 target = gen_reg_rtx (fmode);
4535 emit_unop_insn (icode, target, from,
4536 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4538 if (target != to)
4539 convert_move (to, target, 0);
4540 return;
4544 /* Unsigned integer, and no way to convert directly. For binary
4545 floating point modes, convert as signed, then conditionally adjust
4546 the result. */
4547 if (unsignedp && can_do_signed && !DECIMAL_FLOAT_MODE_P (GET_MODE (to)))
4549 rtx label = gen_label_rtx ();
4550 rtx temp;
4551 REAL_VALUE_TYPE offset;
4553 /* Look for a usable floating mode FMODE wider than the source and at
4554 least as wide as the target. Using FMODE will avoid rounding woes
4555 with unsigned values greater than the signed maximum value. */
4557 for (fmode = GET_MODE (to); fmode != VOIDmode;
4558 fmode = GET_MODE_WIDER_MODE (fmode))
4559 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
4560 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
4561 break;
4563 if (fmode == VOIDmode)
4565 /* There is no such mode. Pretend the target is wide enough. */
4566 fmode = GET_MODE (to);
4568 /* Avoid double-rounding when TO is narrower than FROM. */
4569 if ((significand_size (fmode) + 1)
4570 < GET_MODE_BITSIZE (GET_MODE (from)))
4572 rtx temp1;
4573 rtx neglabel = gen_label_rtx ();
4575 /* Don't use TARGET if it isn't a register, is a hard register,
4576 or is the wrong mode. */
4577 if (!REG_P (target)
4578 || REGNO (target) < FIRST_PSEUDO_REGISTER
4579 || GET_MODE (target) != fmode)
4580 target = gen_reg_rtx (fmode);
4582 imode = GET_MODE (from);
4583 do_pending_stack_adjust ();
4585 /* Test whether the sign bit is set. */
4586 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4587 0, neglabel);
4589 /* The sign bit is not set. Convert as signed. */
4590 expand_float (target, from, 0);
4591 emit_jump_insn (gen_jump (label));
4592 emit_barrier ();
4594 /* The sign bit is set.
4595 Convert to a usable (positive signed) value by shifting right
4596 one bit, while remembering if a nonzero bit was shifted
4597 out; i.e., compute (from & 1) | (from >> 1). */
4599 emit_label (neglabel);
4600 temp = expand_binop (imode, and_optab, from, const1_rtx,
4601 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4602 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
4603 NULL_RTX, 1);
4604 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4605 OPTAB_LIB_WIDEN);
4606 expand_float (target, temp, 0);
4608 /* Multiply by 2 to undo the shift above. */
4609 temp = expand_binop (fmode, add_optab, target, target,
4610 target, 0, OPTAB_LIB_WIDEN);
4611 if (temp != target)
4612 emit_move_insn (target, temp);
4614 do_pending_stack_adjust ();
4615 emit_label (label);
4616 goto done;
4620 /* If we are about to do some arithmetic to correct for an
4621 unsigned operand, do it in a pseudo-register. */
4623 if (GET_MODE (to) != fmode
4624 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4625 target = gen_reg_rtx (fmode);
4627 /* Convert as signed integer to floating. */
4628 expand_float (target, from, 0);
4630 /* If FROM is negative (and therefore TO is negative),
4631 correct its value by 2**bitwidth. */
4633 do_pending_stack_adjust ();
4634 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
4635 0, label);
4638 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)));
4639 temp = expand_binop (fmode, add_optab, target,
4640 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
4641 target, 0, OPTAB_LIB_WIDEN);
4642 if (temp != target)
4643 emit_move_insn (target, temp);
4645 do_pending_stack_adjust ();
4646 emit_label (label);
4647 goto done;
4650 /* No hardware instruction available; call a library routine. */
4652 rtx libfunc;
4653 rtx insns;
4654 rtx value;
4655 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4657 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
4658 from = convert_to_mode (SImode, from, unsignedp);
4660 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4661 gcc_assert (libfunc);
4663 start_sequence ();
4665 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4666 GET_MODE (to), 1, from,
4667 GET_MODE (from));
4668 insns = get_insns ();
4669 end_sequence ();
4671 emit_libcall_block (insns, target, value,
4672 gen_rtx_FLOAT (GET_MODE (to), from));
4675 done:
4677 /* Copy result to requested destination
4678 if we have been computing in a temp location. */
4680 if (target != to)
4682 if (GET_MODE (target) == GET_MODE (to))
4683 emit_move_insn (to, target);
4684 else
4685 convert_move (to, target, 0);
4689 /* Generate code to convert FROM to fixed point and store in TO. FROM
4690 must be floating point. */
4692 void
4693 expand_fix (rtx to, rtx from, int unsignedp)
4695 enum insn_code icode;
4696 rtx target = to;
4697 enum machine_mode fmode, imode;
4698 int must_trunc = 0;
4700 /* We first try to find a pair of modes, one real and one integer, at
4701 least as wide as FROM and TO, respectively, in which we can open-code
4702 this conversion. If the integer mode is wider than the mode of TO,
4703 we can do the conversion either signed or unsigned. */
4705 for (fmode = GET_MODE (from); fmode != VOIDmode;
4706 fmode = GET_MODE_WIDER_MODE (fmode))
4707 for (imode = GET_MODE (to); imode != VOIDmode;
4708 imode = GET_MODE_WIDER_MODE (imode))
4710 int doing_unsigned = unsignedp;
4712 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
4713 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
4714 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
4716 if (icode != CODE_FOR_nothing)
4718 if (fmode != GET_MODE (from))
4719 from = convert_to_mode (fmode, from, 0);
4721 if (must_trunc)
4723 rtx temp = gen_reg_rtx (GET_MODE (from));
4724 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
4725 temp, 0);
4728 if (imode != GET_MODE (to))
4729 target = gen_reg_rtx (imode);
4731 emit_unop_insn (icode, target, from,
4732 doing_unsigned ? UNSIGNED_FIX : FIX);
4733 if (target != to)
4734 convert_move (to, target, unsignedp);
4735 return;
4739 /* For an unsigned conversion, there is one more way to do it.
4740 If we have a signed conversion, we generate code that compares
4741 the real value to the largest representable positive number. If if
4742 is smaller, the conversion is done normally. Otherwise, subtract
4743 one plus the highest signed number, convert, and add it back.
4745 We only need to check all real modes, since we know we didn't find
4746 anything with a wider integer mode.
4748 This code used to extend FP value into mode wider than the destination.
4749 This is not needed. Consider, for instance conversion from SFmode
4750 into DImode.
4752 The hot path through the code is dealing with inputs smaller than 2^63
4753 and doing just the conversion, so there is no bits to lose.
4755 In the other path we know the value is positive in the range 2^63..2^64-1
4756 inclusive. (as for other imput overflow happens and result is undefined)
4757 So we know that the most important bit set in mantissa corresponds to
4758 2^63. The subtraction of 2^63 should not generate any rounding as it
4759 simply clears out that bit. The rest is trivial. */
4761 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
4762 for (fmode = GET_MODE (from); fmode != VOIDmode;
4763 fmode = GET_MODE_WIDER_MODE (fmode))
4764 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0,
4765 &must_trunc))
4767 int bitsize;
4768 REAL_VALUE_TYPE offset;
4769 rtx limit, lab1, lab2, insn;
4771 bitsize = GET_MODE_BITSIZE (GET_MODE (to));
4772 real_2expN (&offset, bitsize - 1);
4773 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
4774 lab1 = gen_label_rtx ();
4775 lab2 = gen_label_rtx ();
4777 if (fmode != GET_MODE (from))
4778 from = convert_to_mode (fmode, from, 0);
4780 /* See if we need to do the subtraction. */
4781 do_pending_stack_adjust ();
4782 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
4783 0, lab1);
4785 /* If not, do the signed "fix" and branch around fixup code. */
4786 expand_fix (to, from, 0);
4787 emit_jump_insn (gen_jump (lab2));
4788 emit_barrier ();
4790 /* Otherwise, subtract 2**(N-1), convert to signed number,
4791 then add 2**(N-1). Do the addition using XOR since this
4792 will often generate better code. */
4793 emit_label (lab1);
4794 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
4795 NULL_RTX, 0, OPTAB_LIB_WIDEN);
4796 expand_fix (to, target, 0);
4797 target = expand_binop (GET_MODE (to), xor_optab, to,
4798 gen_int_mode
4799 ((HOST_WIDE_INT) 1 << (bitsize - 1),
4800 GET_MODE (to)),
4801 to, 1, OPTAB_LIB_WIDEN);
4803 if (target != to)
4804 emit_move_insn (to, target);
4806 emit_label (lab2);
4808 if (mov_optab->handlers[(int) GET_MODE (to)].insn_code
4809 != CODE_FOR_nothing)
4811 /* Make a place for a REG_NOTE and add it. */
4812 insn = emit_move_insn (to, to);
4813 set_unique_reg_note (insn,
4814 REG_EQUAL,
4815 gen_rtx_fmt_e (UNSIGNED_FIX,
4816 GET_MODE (to),
4817 copy_rtx (from)));
4820 return;
4823 /* We can't do it with an insn, so use a library call. But first ensure
4824 that the mode of TO is at least as wide as SImode, since those are the
4825 only library calls we know about. */
4827 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
4829 target = gen_reg_rtx (SImode);
4831 expand_fix (target, from, unsignedp);
4833 else
4835 rtx insns;
4836 rtx value;
4837 rtx libfunc;
4839 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
4840 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4841 gcc_assert (libfunc);
4843 start_sequence ();
4845 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4846 GET_MODE (to), 1, from,
4847 GET_MODE (from));
4848 insns = get_insns ();
4849 end_sequence ();
4851 emit_libcall_block (insns, target, value,
4852 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
4853 GET_MODE (to), from));
4856 if (target != to)
4858 if (GET_MODE (to) == GET_MODE (target))
4859 emit_move_insn (to, target);
4860 else
4861 convert_move (to, target, 0);
4865 /* Report whether we have an instruction to perform the operation
4866 specified by CODE on operands of mode MODE. */
4868 have_insn_for (enum rtx_code code, enum machine_mode mode)
4870 return (code_to_optab[(int) code] != 0
4871 && (code_to_optab[(int) code]->handlers[(int) mode].insn_code
4872 != CODE_FOR_nothing));
4875 /* Create a blank optab. */
4876 static optab
4877 new_optab (void)
4879 int i;
4880 optab op = ggc_alloc (sizeof (struct optab));
4881 for (i = 0; i < NUM_MACHINE_MODES; i++)
4883 op->handlers[i].insn_code = CODE_FOR_nothing;
4884 op->handlers[i].libfunc = 0;
4887 return op;
4890 static convert_optab
4891 new_convert_optab (void)
4893 int i, j;
4894 convert_optab op = ggc_alloc (sizeof (struct convert_optab));
4895 for (i = 0; i < NUM_MACHINE_MODES; i++)
4896 for (j = 0; j < NUM_MACHINE_MODES; j++)
4898 op->handlers[i][j].insn_code = CODE_FOR_nothing;
4899 op->handlers[i][j].libfunc = 0;
4901 return op;
4904 /* Same, but fill in its code as CODE, and write it into the
4905 code_to_optab table. */
4906 static inline optab
4907 init_optab (enum rtx_code code)
4909 optab op = new_optab ();
4910 op->code = code;
4911 code_to_optab[(int) code] = op;
4912 return op;
4915 /* Same, but fill in its code as CODE, and do _not_ write it into
4916 the code_to_optab table. */
4917 static inline optab
4918 init_optabv (enum rtx_code code)
4920 optab op = new_optab ();
4921 op->code = code;
4922 return op;
4925 /* Conversion optabs never go in the code_to_optab table. */
4926 static inline convert_optab
4927 init_convert_optab (enum rtx_code code)
4929 convert_optab op = new_convert_optab ();
4930 op->code = code;
4931 return op;
4934 /* Initialize the libfunc fields of an entire group of entries in some
4935 optab. Each entry is set equal to a string consisting of a leading
4936 pair of underscores followed by a generic operation name followed by
4937 a mode name (downshifted to lowercase) followed by a single character
4938 representing the number of operands for the given operation (which is
4939 usually one of the characters '2', '3', or '4').
4941 OPTABLE is the table in which libfunc fields are to be initialized.
4942 FIRST_MODE is the first machine mode index in the given optab to
4943 initialize.
4944 LAST_MODE is the last machine mode index in the given optab to
4945 initialize.
4946 OPNAME is the generic (string) name of the operation.
4947 SUFFIX is the character which specifies the number of operands for
4948 the given generic operation.
4951 static void
4952 init_libfuncs (optab optable, int first_mode, int last_mode,
4953 const char *opname, int suffix)
4955 int mode;
4956 unsigned opname_len = strlen (opname);
4958 for (mode = first_mode; (int) mode <= (int) last_mode;
4959 mode = (enum machine_mode) ((int) mode + 1))
4961 const char *mname = GET_MODE_NAME (mode);
4962 unsigned mname_len = strlen (mname);
4963 char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1);
4964 char *p;
4965 const char *q;
4967 p = libfunc_name;
4968 *p++ = '_';
4969 *p++ = '_';
4970 for (q = opname; *q; )
4971 *p++ = *q++;
4972 for (q = mname; *q; q++)
4973 *p++ = TOLOWER (*q);
4974 *p++ = suffix;
4975 *p = '\0';
4977 optable->handlers[(int) mode].libfunc
4978 = init_one_libfunc (ggc_alloc_string (libfunc_name, p - libfunc_name));
4982 /* Initialize the libfunc fields of an entire group of entries in some
4983 optab which correspond to all integer mode operations. The parameters
4984 have the same meaning as similarly named ones for the `init_libfuncs'
4985 routine. (See above). */
4987 static void
4988 init_integral_libfuncs (optab optable, const char *opname, int suffix)
4990 int maxsize = 2*BITS_PER_WORD;
4991 if (maxsize < LONG_LONG_TYPE_SIZE)
4992 maxsize = LONG_LONG_TYPE_SIZE;
4993 init_libfuncs (optable, word_mode,
4994 mode_for_size (maxsize, MODE_INT, 0),
4995 opname, suffix);
4998 /* Initialize the libfunc fields of an entire group of entries in some
4999 optab which correspond to all real mode operations. The parameters
5000 have the same meaning as similarly named ones for the `init_libfuncs'
5001 routine. (See above). */
5003 static void
5004 init_floating_libfuncs (optab optable, const char *opname, int suffix)
5006 init_libfuncs (optable, MIN_MODE_FLOAT, MAX_MODE_FLOAT, opname, suffix);
5007 init_libfuncs (optable, MIN_MODE_DECIMAL_FLOAT, MAX_MODE_DECIMAL_FLOAT,
5008 opname, suffix);
5011 /* Initialize the libfunc fields of an entire group of entries of an
5012 inter-mode-class conversion optab. The string formation rules are
5013 similar to the ones for init_libfuncs, above, but instead of having
5014 a mode name and an operand count these functions have two mode names
5015 and no operand count. */
5016 static void
5017 init_interclass_conv_libfuncs (convert_optab tab, const char *opname,
5018 enum mode_class from_class,
5019 enum mode_class to_class)
5021 enum machine_mode first_from_mode = GET_CLASS_NARROWEST_MODE (from_class);
5022 enum machine_mode first_to_mode = GET_CLASS_NARROWEST_MODE (to_class);
5023 size_t opname_len = strlen (opname);
5024 size_t max_mname_len = 0;
5026 enum machine_mode fmode, tmode;
5027 const char *fname, *tname;
5028 const char *q;
5029 char *libfunc_name, *suffix;
5030 char *p;
5032 for (fmode = first_from_mode;
5033 fmode != VOIDmode;
5034 fmode = GET_MODE_WIDER_MODE (fmode))
5035 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (fmode)));
5037 for (tmode = first_to_mode;
5038 tmode != VOIDmode;
5039 tmode = GET_MODE_WIDER_MODE (tmode))
5040 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (tmode)));
5042 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
5043 libfunc_name[0] = '_';
5044 libfunc_name[1] = '_';
5045 memcpy (&libfunc_name[2], opname, opname_len);
5046 suffix = libfunc_name + opname_len + 2;
5048 for (fmode = first_from_mode; fmode != VOIDmode;
5049 fmode = GET_MODE_WIDER_MODE (fmode))
5050 for (tmode = first_to_mode; tmode != VOIDmode;
5051 tmode = GET_MODE_WIDER_MODE (tmode))
5053 fname = GET_MODE_NAME (fmode);
5054 tname = GET_MODE_NAME (tmode);
5056 p = suffix;
5057 for (q = fname; *q; p++, q++)
5058 *p = TOLOWER (*q);
5059 for (q = tname; *q; p++, q++)
5060 *p = TOLOWER (*q);
5062 *p = '\0';
5064 tab->handlers[tmode][fmode].libfunc
5065 = init_one_libfunc (ggc_alloc_string (libfunc_name,
5066 p - libfunc_name));
5070 /* Initialize the libfunc fields of an entire group of entries of an
5071 intra-mode-class conversion optab. The string formation rules are
5072 similar to the ones for init_libfunc, above. WIDENING says whether
5073 the optab goes from narrow to wide modes or vice versa. These functions
5074 have two mode names _and_ an operand count. */
5075 static void
5076 init_intraclass_conv_libfuncs (convert_optab tab, const char *opname,
5077 enum mode_class class, bool widening)
5079 enum machine_mode first_mode = GET_CLASS_NARROWEST_MODE (class);
5080 size_t opname_len = strlen (opname);
5081 size_t max_mname_len = 0;
5083 enum machine_mode nmode, wmode;
5084 const char *nname, *wname;
5085 const char *q;
5086 char *libfunc_name, *suffix;
5087 char *p;
5089 for (nmode = first_mode; nmode != VOIDmode;
5090 nmode = GET_MODE_WIDER_MODE (nmode))
5091 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (nmode)));
5093 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
5094 libfunc_name[0] = '_';
5095 libfunc_name[1] = '_';
5096 memcpy (&libfunc_name[2], opname, opname_len);
5097 suffix = libfunc_name + opname_len + 2;
5099 for (nmode = first_mode; nmode != VOIDmode;
5100 nmode = GET_MODE_WIDER_MODE (nmode))
5101 for (wmode = GET_MODE_WIDER_MODE (nmode); wmode != VOIDmode;
5102 wmode = GET_MODE_WIDER_MODE (wmode))
5104 nname = GET_MODE_NAME (nmode);
5105 wname = GET_MODE_NAME (wmode);
5107 p = suffix;
5108 for (q = widening ? nname : wname; *q; p++, q++)
5109 *p = TOLOWER (*q);
5110 for (q = widening ? wname : nname; *q; p++, q++)
5111 *p = TOLOWER (*q);
5113 *p++ = '2';
5114 *p = '\0';
5116 tab->handlers[widening ? wmode : nmode]
5117 [widening ? nmode : wmode].libfunc
5118 = init_one_libfunc (ggc_alloc_string (libfunc_name,
5119 p - libfunc_name));
5125 init_one_libfunc (const char *name)
5127 rtx symbol;
5129 /* Create a FUNCTION_DECL that can be passed to
5130 targetm.encode_section_info. */
5131 /* ??? We don't have any type information except for this is
5132 a function. Pretend this is "int foo()". */
5133 tree decl = build_decl (FUNCTION_DECL, get_identifier (name),
5134 build_function_type (integer_type_node, NULL_TREE));
5135 DECL_ARTIFICIAL (decl) = 1;
5136 DECL_EXTERNAL (decl) = 1;
5137 TREE_PUBLIC (decl) = 1;
5139 symbol = XEXP (DECL_RTL (decl), 0);
5141 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
5142 are the flags assigned by targetm.encode_section_info. */
5143 SET_SYMBOL_REF_DECL (symbol, 0);
5145 return symbol;
5148 /* Call this to reset the function entry for one optab (OPTABLE) in mode
5149 MODE to NAME, which should be either 0 or a string constant. */
5150 void
5151 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
5153 if (name)
5154 optable->handlers[mode].libfunc = init_one_libfunc (name);
5155 else
5156 optable->handlers[mode].libfunc = 0;
5159 /* Call this to reset the function entry for one conversion optab
5160 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
5161 either 0 or a string constant. */
5162 void
5163 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
5164 enum machine_mode fmode, const char *name)
5166 if (name)
5167 optable->handlers[tmode][fmode].libfunc = init_one_libfunc (name);
5168 else
5169 optable->handlers[tmode][fmode].libfunc = 0;
5172 /* Call this once to initialize the contents of the optabs
5173 appropriately for the current target machine. */
5175 void
5176 init_optabs (void)
5178 unsigned int i;
5180 /* Start by initializing all tables to contain CODE_FOR_nothing. */
5182 for (i = 0; i < NUM_RTX_CODE; i++)
5183 setcc_gen_code[i] = CODE_FOR_nothing;
5185 #ifdef HAVE_conditional_move
5186 for (i = 0; i < NUM_MACHINE_MODES; i++)
5187 movcc_gen_code[i] = CODE_FOR_nothing;
5188 #endif
5190 for (i = 0; i < NUM_MACHINE_MODES; i++)
5192 vcond_gen_code[i] = CODE_FOR_nothing;
5193 vcondu_gen_code[i] = CODE_FOR_nothing;
5196 add_optab = init_optab (PLUS);
5197 addv_optab = init_optabv (PLUS);
5198 sub_optab = init_optab (MINUS);
5199 subv_optab = init_optabv (MINUS);
5200 smul_optab = init_optab (MULT);
5201 smulv_optab = init_optabv (MULT);
5202 smul_highpart_optab = init_optab (UNKNOWN);
5203 umul_highpart_optab = init_optab (UNKNOWN);
5204 smul_widen_optab = init_optab (UNKNOWN);
5205 umul_widen_optab = init_optab (UNKNOWN);
5206 usmul_widen_optab = init_optab (UNKNOWN);
5207 sdiv_optab = init_optab (DIV);
5208 sdivv_optab = init_optabv (DIV);
5209 sdivmod_optab = init_optab (UNKNOWN);
5210 udiv_optab = init_optab (UDIV);
5211 udivmod_optab = init_optab (UNKNOWN);
5212 smod_optab = init_optab (MOD);
5213 umod_optab = init_optab (UMOD);
5214 fmod_optab = init_optab (UNKNOWN);
5215 drem_optab = init_optab (UNKNOWN);
5216 ftrunc_optab = init_optab (UNKNOWN);
5217 and_optab = init_optab (AND);
5218 ior_optab = init_optab (IOR);
5219 xor_optab = init_optab (XOR);
5220 ashl_optab = init_optab (ASHIFT);
5221 ashr_optab = init_optab (ASHIFTRT);
5222 lshr_optab = init_optab (LSHIFTRT);
5223 rotl_optab = init_optab (ROTATE);
5224 rotr_optab = init_optab (ROTATERT);
5225 smin_optab = init_optab (SMIN);
5226 smax_optab = init_optab (SMAX);
5227 umin_optab = init_optab (UMIN);
5228 umax_optab = init_optab (UMAX);
5229 pow_optab = init_optab (UNKNOWN);
5230 atan2_optab = init_optab (UNKNOWN);
5232 /* These three have codes assigned exclusively for the sake of
5233 have_insn_for. */
5234 mov_optab = init_optab (SET);
5235 movstrict_optab = init_optab (STRICT_LOW_PART);
5236 cmp_optab = init_optab (COMPARE);
5238 ucmp_optab = init_optab (UNKNOWN);
5239 tst_optab = init_optab (UNKNOWN);
5241 eq_optab = init_optab (EQ);
5242 ne_optab = init_optab (NE);
5243 gt_optab = init_optab (GT);
5244 ge_optab = init_optab (GE);
5245 lt_optab = init_optab (LT);
5246 le_optab = init_optab (LE);
5247 unord_optab = init_optab (UNORDERED);
5249 neg_optab = init_optab (NEG);
5250 negv_optab = init_optabv (NEG);
5251 abs_optab = init_optab (ABS);
5252 absv_optab = init_optabv (ABS);
5253 addcc_optab = init_optab (UNKNOWN);
5254 one_cmpl_optab = init_optab (NOT);
5255 ffs_optab = init_optab (FFS);
5256 clz_optab = init_optab (CLZ);
5257 ctz_optab = init_optab (CTZ);
5258 popcount_optab = init_optab (POPCOUNT);
5259 parity_optab = init_optab (PARITY);
5260 sqrt_optab = init_optab (SQRT);
5261 floor_optab = init_optab (UNKNOWN);
5262 lfloor_optab = init_optab (UNKNOWN);
5263 ceil_optab = init_optab (UNKNOWN);
5264 lceil_optab = init_optab (UNKNOWN);
5265 round_optab = init_optab (UNKNOWN);
5266 btrunc_optab = init_optab (UNKNOWN);
5267 nearbyint_optab = init_optab (UNKNOWN);
5268 rint_optab = init_optab (UNKNOWN);
5269 lrint_optab = init_optab (UNKNOWN);
5270 sincos_optab = init_optab (UNKNOWN);
5271 sin_optab = init_optab (UNKNOWN);
5272 asin_optab = init_optab (UNKNOWN);
5273 cos_optab = init_optab (UNKNOWN);
5274 acos_optab = init_optab (UNKNOWN);
5275 exp_optab = init_optab (UNKNOWN);
5276 exp10_optab = init_optab (UNKNOWN);
5277 exp2_optab = init_optab (UNKNOWN);
5278 expm1_optab = init_optab (UNKNOWN);
5279 ldexp_optab = init_optab (UNKNOWN);
5280 logb_optab = init_optab (UNKNOWN);
5281 ilogb_optab = init_optab (UNKNOWN);
5282 log_optab = init_optab (UNKNOWN);
5283 log10_optab = init_optab (UNKNOWN);
5284 log2_optab = init_optab (UNKNOWN);
5285 log1p_optab = init_optab (UNKNOWN);
5286 tan_optab = init_optab (UNKNOWN);
5287 atan_optab = init_optab (UNKNOWN);
5288 copysign_optab = init_optab (UNKNOWN);
5290 strlen_optab = init_optab (UNKNOWN);
5291 cbranch_optab = init_optab (UNKNOWN);
5292 cmov_optab = init_optab (UNKNOWN);
5293 cstore_optab = init_optab (UNKNOWN);
5294 push_optab = init_optab (UNKNOWN);
5296 reduc_smax_optab = init_optab (UNKNOWN);
5297 reduc_umax_optab = init_optab (UNKNOWN);
5298 reduc_smin_optab = init_optab (UNKNOWN);
5299 reduc_umin_optab = init_optab (UNKNOWN);
5300 reduc_splus_optab = init_optab (UNKNOWN);
5301 reduc_uplus_optab = init_optab (UNKNOWN);
5303 ssum_widen_optab = init_optab (UNKNOWN);
5304 usum_widen_optab = init_optab (UNKNOWN);
5305 sdot_prod_optab = init_optab (UNKNOWN);
5306 udot_prod_optab = init_optab (UNKNOWN);
5308 vec_extract_optab = init_optab (UNKNOWN);
5309 vec_set_optab = init_optab (UNKNOWN);
5310 vec_init_optab = init_optab (UNKNOWN);
5311 vec_shl_optab = init_optab (UNKNOWN);
5312 vec_shr_optab = init_optab (UNKNOWN);
5313 vec_realign_load_optab = init_optab (UNKNOWN);
5314 movmisalign_optab = init_optab (UNKNOWN);
5316 powi_optab = init_optab (UNKNOWN);
5318 /* Conversions. */
5319 sext_optab = init_convert_optab (SIGN_EXTEND);
5320 zext_optab = init_convert_optab (ZERO_EXTEND);
5321 trunc_optab = init_convert_optab (TRUNCATE);
5322 sfix_optab = init_convert_optab (FIX);
5323 ufix_optab = init_convert_optab (UNSIGNED_FIX);
5324 sfixtrunc_optab = init_convert_optab (UNKNOWN);
5325 ufixtrunc_optab = init_convert_optab (UNKNOWN);
5326 sfloat_optab = init_convert_optab (FLOAT);
5327 ufloat_optab = init_convert_optab (UNSIGNED_FLOAT);
5329 for (i = 0; i < NUM_MACHINE_MODES; i++)
5331 movmem_optab[i] = CODE_FOR_nothing;
5332 cmpstr_optab[i] = CODE_FOR_nothing;
5333 cmpstrn_optab[i] = CODE_FOR_nothing;
5334 cmpmem_optab[i] = CODE_FOR_nothing;
5335 setmem_optab[i] = CODE_FOR_nothing;
5337 sync_add_optab[i] = CODE_FOR_nothing;
5338 sync_sub_optab[i] = CODE_FOR_nothing;
5339 sync_ior_optab[i] = CODE_FOR_nothing;
5340 sync_and_optab[i] = CODE_FOR_nothing;
5341 sync_xor_optab[i] = CODE_FOR_nothing;
5342 sync_nand_optab[i] = CODE_FOR_nothing;
5343 sync_old_add_optab[i] = CODE_FOR_nothing;
5344 sync_old_sub_optab[i] = CODE_FOR_nothing;
5345 sync_old_ior_optab[i] = CODE_FOR_nothing;
5346 sync_old_and_optab[i] = CODE_FOR_nothing;
5347 sync_old_xor_optab[i] = CODE_FOR_nothing;
5348 sync_old_nand_optab[i] = CODE_FOR_nothing;
5349 sync_new_add_optab[i] = CODE_FOR_nothing;
5350 sync_new_sub_optab[i] = CODE_FOR_nothing;
5351 sync_new_ior_optab[i] = CODE_FOR_nothing;
5352 sync_new_and_optab[i] = CODE_FOR_nothing;
5353 sync_new_xor_optab[i] = CODE_FOR_nothing;
5354 sync_new_nand_optab[i] = CODE_FOR_nothing;
5355 sync_compare_and_swap[i] = CODE_FOR_nothing;
5356 sync_compare_and_swap_cc[i] = CODE_FOR_nothing;
5357 sync_lock_test_and_set[i] = CODE_FOR_nothing;
5358 sync_lock_release[i] = CODE_FOR_nothing;
5360 reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
5363 /* Fill in the optabs with the insns we support. */
5364 init_all_optabs ();
5366 /* Initialize the optabs with the names of the library functions. */
5367 init_integral_libfuncs (add_optab, "add", '3');
5368 init_floating_libfuncs (add_optab, "add", '3');
5369 init_integral_libfuncs (addv_optab, "addv", '3');
5370 init_floating_libfuncs (addv_optab, "add", '3');
5371 init_integral_libfuncs (sub_optab, "sub", '3');
5372 init_floating_libfuncs (sub_optab, "sub", '3');
5373 init_integral_libfuncs (subv_optab, "subv", '3');
5374 init_floating_libfuncs (subv_optab, "sub", '3');
5375 init_integral_libfuncs (smul_optab, "mul", '3');
5376 init_floating_libfuncs (smul_optab, "mul", '3');
5377 init_integral_libfuncs (smulv_optab, "mulv", '3');
5378 init_floating_libfuncs (smulv_optab, "mul", '3');
5379 init_integral_libfuncs (sdiv_optab, "div", '3');
5380 init_floating_libfuncs (sdiv_optab, "div", '3');
5381 init_integral_libfuncs (sdivv_optab, "divv", '3');
5382 init_integral_libfuncs (udiv_optab, "udiv", '3');
5383 init_integral_libfuncs (sdivmod_optab, "divmod", '4');
5384 init_integral_libfuncs (udivmod_optab, "udivmod", '4');
5385 init_integral_libfuncs (smod_optab, "mod", '3');
5386 init_integral_libfuncs (umod_optab, "umod", '3');
5387 init_floating_libfuncs (ftrunc_optab, "ftrunc", '2');
5388 init_integral_libfuncs (and_optab, "and", '3');
5389 init_integral_libfuncs (ior_optab, "ior", '3');
5390 init_integral_libfuncs (xor_optab, "xor", '3');
5391 init_integral_libfuncs (ashl_optab, "ashl", '3');
5392 init_integral_libfuncs (ashr_optab, "ashr", '3');
5393 init_integral_libfuncs (lshr_optab, "lshr", '3');
5394 init_integral_libfuncs (smin_optab, "min", '3');
5395 init_floating_libfuncs (smin_optab, "min", '3');
5396 init_integral_libfuncs (smax_optab, "max", '3');
5397 init_floating_libfuncs (smax_optab, "max", '3');
5398 init_integral_libfuncs (umin_optab, "umin", '3');
5399 init_integral_libfuncs (umax_optab, "umax", '3');
5400 init_integral_libfuncs (neg_optab, "neg", '2');
5401 init_floating_libfuncs (neg_optab, "neg", '2');
5402 init_integral_libfuncs (negv_optab, "negv", '2');
5403 init_floating_libfuncs (negv_optab, "neg", '2');
5404 init_integral_libfuncs (one_cmpl_optab, "one_cmpl", '2');
5405 init_integral_libfuncs (ffs_optab, "ffs", '2');
5406 init_integral_libfuncs (clz_optab, "clz", '2');
5407 init_integral_libfuncs (ctz_optab, "ctz", '2');
5408 init_integral_libfuncs (popcount_optab, "popcount", '2');
5409 init_integral_libfuncs (parity_optab, "parity", '2');
5411 /* Comparison libcalls for integers MUST come in pairs,
5412 signed/unsigned. */
5413 init_integral_libfuncs (cmp_optab, "cmp", '2');
5414 init_integral_libfuncs (ucmp_optab, "ucmp", '2');
5415 init_floating_libfuncs (cmp_optab, "cmp", '2');
5417 /* EQ etc are floating point only. */
5418 init_floating_libfuncs (eq_optab, "eq", '2');
5419 init_floating_libfuncs (ne_optab, "ne", '2');
5420 init_floating_libfuncs (gt_optab, "gt", '2');
5421 init_floating_libfuncs (ge_optab, "ge", '2');
5422 init_floating_libfuncs (lt_optab, "lt", '2');
5423 init_floating_libfuncs (le_optab, "le", '2');
5424 init_floating_libfuncs (unord_optab, "unord", '2');
5426 init_floating_libfuncs (powi_optab, "powi", '2');
5428 /* Conversions. */
5429 init_interclass_conv_libfuncs (sfloat_optab, "float",
5430 MODE_INT, MODE_FLOAT);
5431 init_interclass_conv_libfuncs (sfloat_optab, "float",
5432 MODE_INT, MODE_DECIMAL_FLOAT);
5433 init_interclass_conv_libfuncs (ufloat_optab, "floatun",
5434 MODE_INT, MODE_FLOAT);
5435 init_interclass_conv_libfuncs (ufloat_optab, "floatun",
5436 MODE_INT, MODE_DECIMAL_FLOAT);
5437 init_interclass_conv_libfuncs (sfix_optab, "fix",
5438 MODE_FLOAT, MODE_INT);
5439 init_interclass_conv_libfuncs (sfix_optab, "fix",
5440 MODE_DECIMAL_FLOAT, MODE_INT);
5441 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5442 MODE_FLOAT, MODE_INT);
5443 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5444 MODE_DECIMAL_FLOAT, MODE_INT);
5445 init_interclass_conv_libfuncs (ufloat_optab, "floatuns",
5446 MODE_INT, MODE_DECIMAL_FLOAT);
5448 /* sext_optab is also used for FLOAT_EXTEND. */
5449 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, true);
5450 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_DECIMAL_FLOAT, true);
5451 init_interclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, MODE_DECIMAL_FLOAT);
5452 init_interclass_conv_libfuncs (sext_optab, "extend", MODE_DECIMAL_FLOAT, MODE_FLOAT);
5453 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, false);
5454 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_DECIMAL_FLOAT, false);
5455 init_interclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, MODE_DECIMAL_FLOAT);
5456 init_interclass_conv_libfuncs (trunc_optab, "trunc", MODE_DECIMAL_FLOAT, MODE_FLOAT);
5458 /* Use cabs for double complex abs, since systems generally have cabs.
5459 Don't define any libcall for float complex, so that cabs will be used. */
5460 if (complex_double_type_node)
5461 abs_optab->handlers[TYPE_MODE (complex_double_type_node)].libfunc
5462 = init_one_libfunc ("cabs");
5464 /* The ffs function operates on `int'. */
5465 ffs_optab->handlers[(int) mode_for_size (INT_TYPE_SIZE, MODE_INT, 0)].libfunc
5466 = init_one_libfunc ("ffs");
5468 abort_libfunc = init_one_libfunc ("abort");
5469 memcpy_libfunc = init_one_libfunc ("memcpy");
5470 memmove_libfunc = init_one_libfunc ("memmove");
5471 memcmp_libfunc = init_one_libfunc ("memcmp");
5472 memset_libfunc = init_one_libfunc ("memset");
5473 setbits_libfunc = init_one_libfunc ("__setbits");
5475 #ifndef DONT_USE_BUILTIN_SETJMP
5476 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
5477 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
5478 #else
5479 setjmp_libfunc = init_one_libfunc ("setjmp");
5480 longjmp_libfunc = init_one_libfunc ("longjmp");
5481 #endif
5482 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
5483 unwind_sjlj_unregister_libfunc
5484 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5486 /* For function entry/exit instrumentation. */
5487 profile_function_entry_libfunc
5488 = init_one_libfunc ("__cyg_profile_func_enter");
5489 profile_function_exit_libfunc
5490 = init_one_libfunc ("__cyg_profile_func_exit");
5492 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
5494 if (HAVE_conditional_trap)
5495 trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
5497 /* Allow the target to add more libcalls or rename some, etc. */
5498 targetm.init_libfuncs ();
5501 #ifdef DEBUG
5503 /* Print information about the current contents of the optabs on
5504 STDERR. */
5506 static void
5507 debug_optab_libfuncs (void)
5509 int i;
5510 int j;
5511 int k;
5513 /* Dump the arithmetic optabs. */
5514 for (i = 0; i != (int) OTI_MAX; i++)
5515 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5517 optab o;
5518 struct optab_handlers *h;
5520 o = optab_table[i];
5521 h = &o->handlers[j];
5522 if (h->libfunc)
5524 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5525 fprintf (stderr, "%s\t%s:\t%s\n",
5526 GET_RTX_NAME (o->code),
5527 GET_MODE_NAME (j),
5528 XSTR (h->libfunc, 0));
5532 /* Dump the conversion optabs. */
5533 for (i = 0; i < (int) COI_MAX; ++i)
5534 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5535 for (k = 0; k < NUM_MACHINE_MODES; ++k)
5537 convert_optab o;
5538 struct optab_handlers *h;
5540 o = &convert_optab_table[i];
5541 h = &o->handlers[j][k];
5542 if (h->libfunc)
5544 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5545 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
5546 GET_RTX_NAME (o->code),
5547 GET_MODE_NAME (j),
5548 GET_MODE_NAME (k),
5549 XSTR (h->libfunc, 0));
5554 #endif /* DEBUG */
5557 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5558 CODE. Return 0 on failure. */
5561 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1,
5562 rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED)
5564 enum machine_mode mode = GET_MODE (op1);
5565 enum insn_code icode;
5566 rtx insn;
5568 if (!HAVE_conditional_trap)
5569 return 0;
5571 if (mode == VOIDmode)
5572 return 0;
5574 icode = cmp_optab->handlers[(int) mode].insn_code;
5575 if (icode == CODE_FOR_nothing)
5576 return 0;
5578 start_sequence ();
5579 op1 = prepare_operand (icode, op1, 0, mode, mode, 0);
5580 op2 = prepare_operand (icode, op2, 1, mode, mode, 0);
5581 if (!op1 || !op2)
5583 end_sequence ();
5584 return 0;
5586 emit_insn (GEN_FCN (icode) (op1, op2));
5588 PUT_CODE (trap_rtx, code);
5589 gcc_assert (HAVE_conditional_trap);
5590 insn = gen_conditional_trap (trap_rtx, tcode);
5591 if (insn)
5593 emit_insn (insn);
5594 insn = get_insns ();
5596 end_sequence ();
5598 return insn;
5601 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5602 or unsigned operation code. */
5604 static enum rtx_code
5605 get_rtx_code (enum tree_code tcode, bool unsignedp)
5607 enum rtx_code code;
5608 switch (tcode)
5610 case EQ_EXPR:
5611 code = EQ;
5612 break;
5613 case NE_EXPR:
5614 code = NE;
5615 break;
5616 case LT_EXPR:
5617 code = unsignedp ? LTU : LT;
5618 break;
5619 case LE_EXPR:
5620 code = unsignedp ? LEU : LE;
5621 break;
5622 case GT_EXPR:
5623 code = unsignedp ? GTU : GT;
5624 break;
5625 case GE_EXPR:
5626 code = unsignedp ? GEU : GE;
5627 break;
5629 case UNORDERED_EXPR:
5630 code = UNORDERED;
5631 break;
5632 case ORDERED_EXPR:
5633 code = ORDERED;
5634 break;
5635 case UNLT_EXPR:
5636 code = UNLT;
5637 break;
5638 case UNLE_EXPR:
5639 code = UNLE;
5640 break;
5641 case UNGT_EXPR:
5642 code = UNGT;
5643 break;
5644 case UNGE_EXPR:
5645 code = UNGE;
5646 break;
5647 case UNEQ_EXPR:
5648 code = UNEQ;
5649 break;
5650 case LTGT_EXPR:
5651 code = LTGT;
5652 break;
5654 default:
5655 gcc_unreachable ();
5657 return code;
5660 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5661 unsigned operators. Do not generate compare instruction. */
5663 static rtx
5664 vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
5666 enum rtx_code rcode;
5667 tree t_op0, t_op1;
5668 rtx rtx_op0, rtx_op1;
5670 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
5671 ensures that condition is a relational operation. */
5672 gcc_assert (COMPARISON_CLASS_P (cond));
5674 rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
5675 t_op0 = TREE_OPERAND (cond, 0);
5676 t_op1 = TREE_OPERAND (cond, 1);
5678 /* Expand operands. */
5679 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)), 1);
5680 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)), 1);
5682 if (!insn_data[icode].operand[4].predicate (rtx_op0, GET_MODE (rtx_op0))
5683 && GET_MODE (rtx_op0) != VOIDmode)
5684 rtx_op0 = force_reg (GET_MODE (rtx_op0), rtx_op0);
5686 if (!insn_data[icode].operand[5].predicate (rtx_op1, GET_MODE (rtx_op1))
5687 && GET_MODE (rtx_op1) != VOIDmode)
5688 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
5690 return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1);
5693 /* Return insn code for VEC_COND_EXPR EXPR. */
5695 static inline enum insn_code
5696 get_vcond_icode (tree expr, enum machine_mode mode)
5698 enum insn_code icode = CODE_FOR_nothing;
5700 if (TYPE_UNSIGNED (TREE_TYPE (expr)))
5701 icode = vcondu_gen_code[mode];
5702 else
5703 icode = vcond_gen_code[mode];
5704 return icode;
5707 /* Return TRUE iff, appropriate vector insns are available
5708 for vector cond expr expr in VMODE mode. */
5710 bool
5711 expand_vec_cond_expr_p (tree expr, enum machine_mode vmode)
5713 if (get_vcond_icode (expr, vmode) == CODE_FOR_nothing)
5714 return false;
5715 return true;
5718 /* Generate insns for VEC_COND_EXPR. */
5721 expand_vec_cond_expr (tree vec_cond_expr, rtx target)
5723 enum insn_code icode;
5724 rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1;
5725 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_cond_expr));
5726 bool unsignedp = TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr));
5728 icode = get_vcond_icode (vec_cond_expr, mode);
5729 if (icode == CODE_FOR_nothing)
5730 return 0;
5732 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5733 target = gen_reg_rtx (mode);
5735 /* Get comparison rtx. First expand both cond expr operands. */
5736 comparison = vector_compare_rtx (TREE_OPERAND (vec_cond_expr, 0),
5737 unsignedp, icode);
5738 cc_op0 = XEXP (comparison, 0);
5739 cc_op1 = XEXP (comparison, 1);
5740 /* Expand both operands and force them in reg, if required. */
5741 rtx_op1 = expand_expr (TREE_OPERAND (vec_cond_expr, 1),
5742 NULL_RTX, VOIDmode, EXPAND_NORMAL);
5743 if (!insn_data[icode].operand[1].predicate (rtx_op1, mode)
5744 && mode != VOIDmode)
5745 rtx_op1 = force_reg (mode, rtx_op1);
5747 rtx_op2 = expand_expr (TREE_OPERAND (vec_cond_expr, 2),
5748 NULL_RTX, VOIDmode, EXPAND_NORMAL);
5749 if (!insn_data[icode].operand[2].predicate (rtx_op2, mode)
5750 && mode != VOIDmode)
5751 rtx_op2 = force_reg (mode, rtx_op2);
5753 /* Emit instruction! */
5754 emit_insn (GEN_FCN (icode) (target, rtx_op1, rtx_op2,
5755 comparison, cc_op0, cc_op1));
5757 return target;
5761 /* This is an internal subroutine of the other compare_and_swap expanders.
5762 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
5763 operation. TARGET is an optional place to store the value result of
5764 the operation. ICODE is the particular instruction to expand. Return
5765 the result of the operation. */
5767 static rtx
5768 expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val,
5769 rtx target, enum insn_code icode)
5771 enum machine_mode mode = GET_MODE (mem);
5772 rtx insn;
5774 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5775 target = gen_reg_rtx (mode);
5777 if (GET_MODE (old_val) != VOIDmode && GET_MODE (old_val) != mode)
5778 old_val = convert_modes (mode, GET_MODE (old_val), old_val, 1);
5779 if (!insn_data[icode].operand[2].predicate (old_val, mode))
5780 old_val = force_reg (mode, old_val);
5782 if (GET_MODE (new_val) != VOIDmode && GET_MODE (new_val) != mode)
5783 new_val = convert_modes (mode, GET_MODE (new_val), new_val, 1);
5784 if (!insn_data[icode].operand[3].predicate (new_val, mode))
5785 new_val = force_reg (mode, new_val);
5787 insn = GEN_FCN (icode) (target, mem, old_val, new_val);
5788 if (insn == NULL_RTX)
5789 return NULL_RTX;
5790 emit_insn (insn);
5792 return target;
5795 /* Expand a compare-and-swap operation and return its value. */
5798 expand_val_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
5800 enum machine_mode mode = GET_MODE (mem);
5801 enum insn_code icode = sync_compare_and_swap[mode];
5803 if (icode == CODE_FOR_nothing)
5804 return NULL_RTX;
5806 return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode);
5809 /* Expand a compare-and-swap operation and store true into the result if
5810 the operation was successful and false otherwise. Return the result.
5811 Unlike other routines, TARGET is not optional. */
5814 expand_bool_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
5816 enum machine_mode mode = GET_MODE (mem);
5817 enum insn_code icode;
5818 rtx subtarget, label0, label1;
5820 /* If the target supports a compare-and-swap pattern that simultaneously
5821 sets some flag for success, then use it. Otherwise use the regular
5822 compare-and-swap and follow that immediately with a compare insn. */
5823 icode = sync_compare_and_swap_cc[mode];
5824 switch (icode)
5826 default:
5827 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
5828 NULL_RTX, icode);
5829 if (subtarget != NULL_RTX)
5830 break;
5832 /* FALLTHRU */
5833 case CODE_FOR_nothing:
5834 icode = sync_compare_and_swap[mode];
5835 if (icode == CODE_FOR_nothing)
5836 return NULL_RTX;
5838 /* Ensure that if old_val == mem, that we're not comparing
5839 against an old value. */
5840 if (MEM_P (old_val))
5841 old_val = force_reg (mode, old_val);
5843 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
5844 NULL_RTX, icode);
5845 if (subtarget == NULL_RTX)
5846 return NULL_RTX;
5848 emit_cmp_insn (subtarget, old_val, EQ, const0_rtx, mode, true);
5851 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
5852 setcc instruction from the beginning. We don't work too hard here,
5853 but it's nice to not be stupid about initial code gen either. */
5854 if (STORE_FLAG_VALUE == 1)
5856 icode = setcc_gen_code[EQ];
5857 if (icode != CODE_FOR_nothing)
5859 enum machine_mode cmode = insn_data[icode].operand[0].mode;
5860 rtx insn;
5862 subtarget = target;
5863 if (!insn_data[icode].operand[0].predicate (target, cmode))
5864 subtarget = gen_reg_rtx (cmode);
5866 insn = GEN_FCN (icode) (subtarget);
5867 if (insn)
5869 emit_insn (insn);
5870 if (GET_MODE (target) != GET_MODE (subtarget))
5872 convert_move (target, subtarget, 1);
5873 subtarget = target;
5875 return subtarget;
5880 /* Without an appropriate setcc instruction, use a set of branches to
5881 get 1 and 0 stored into target. Presumably if the target has a
5882 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
5884 label0 = gen_label_rtx ();
5885 label1 = gen_label_rtx ();
5887 emit_jump_insn (bcc_gen_fctn[EQ] (label0));
5888 emit_move_insn (target, const0_rtx);
5889 emit_jump_insn (gen_jump (label1));
5890 emit_barrier ();
5891 emit_label (label0);
5892 emit_move_insn (target, const1_rtx);
5893 emit_label (label1);
5895 return target;
5898 /* This is a helper function for the other atomic operations. This function
5899 emits a loop that contains SEQ that iterates until a compare-and-swap
5900 operation at the end succeeds. MEM is the memory to be modified. SEQ is
5901 a set of instructions that takes a value from OLD_REG as an input and
5902 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
5903 set to the current contents of MEM. After SEQ, a compare-and-swap will
5904 attempt to update MEM with NEW_REG. The function returns true when the
5905 loop was generated successfully. */
5907 static bool
5908 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
5910 enum machine_mode mode = GET_MODE (mem);
5911 enum insn_code icode;
5912 rtx label, cmp_reg, subtarget;
5914 /* The loop we want to generate looks like
5916 cmp_reg = mem;
5917 label:
5918 old_reg = cmp_reg;
5919 seq;
5920 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
5921 if (cmp_reg != old_reg)
5922 goto label;
5924 Note that we only do the plain load from memory once. Subsequent
5925 iterations use the value loaded by the compare-and-swap pattern. */
5927 label = gen_label_rtx ();
5928 cmp_reg = gen_reg_rtx (mode);
5930 emit_move_insn (cmp_reg, mem);
5931 emit_label (label);
5932 emit_move_insn (old_reg, cmp_reg);
5933 if (seq)
5934 emit_insn (seq);
5936 /* If the target supports a compare-and-swap pattern that simultaneously
5937 sets some flag for success, then use it. Otherwise use the regular
5938 compare-and-swap and follow that immediately with a compare insn. */
5939 icode = sync_compare_and_swap_cc[mode];
5940 switch (icode)
5942 default:
5943 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
5944 cmp_reg, icode);
5945 if (subtarget != NULL_RTX)
5947 gcc_assert (subtarget == cmp_reg);
5948 break;
5951 /* FALLTHRU */
5952 case CODE_FOR_nothing:
5953 icode = sync_compare_and_swap[mode];
5954 if (icode == CODE_FOR_nothing)
5955 return false;
5957 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
5958 cmp_reg, icode);
5959 if (subtarget == NULL_RTX)
5960 return false;
5961 if (subtarget != cmp_reg)
5962 emit_move_insn (cmp_reg, subtarget);
5964 emit_cmp_insn (cmp_reg, old_reg, EQ, const0_rtx, mode, true);
5967 /* ??? Mark this jump predicted not taken? */
5968 emit_jump_insn (bcc_gen_fctn[NE] (label));
5970 return true;
5973 /* This function generates the atomic operation MEM CODE= VAL. In this
5974 case, we do not care about any resulting value. Returns NULL if we
5975 cannot generate the operation. */
5978 expand_sync_operation (rtx mem, rtx val, enum rtx_code code)
5980 enum machine_mode mode = GET_MODE (mem);
5981 enum insn_code icode;
5982 rtx insn;
5984 /* Look to see if the target supports the operation directly. */
5985 switch (code)
5987 case PLUS:
5988 icode = sync_add_optab[mode];
5989 break;
5990 case IOR:
5991 icode = sync_ior_optab[mode];
5992 break;
5993 case XOR:
5994 icode = sync_xor_optab[mode];
5995 break;
5996 case AND:
5997 icode = sync_and_optab[mode];
5998 break;
5999 case NOT:
6000 icode = sync_nand_optab[mode];
6001 break;
6003 case MINUS:
6004 icode = sync_sub_optab[mode];
6005 if (icode == CODE_FOR_nothing)
6007 icode = sync_add_optab[mode];
6008 if (icode != CODE_FOR_nothing)
6010 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
6011 code = PLUS;
6014 break;
6016 default:
6017 gcc_unreachable ();
6020 /* Generate the direct operation, if present. */
6021 if (icode != CODE_FOR_nothing)
6023 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6024 val = convert_modes (mode, GET_MODE (val), val, 1);
6025 if (!insn_data[icode].operand[1].predicate (val, mode))
6026 val = force_reg (mode, val);
6028 insn = GEN_FCN (icode) (mem, val);
6029 if (insn)
6031 emit_insn (insn);
6032 return const0_rtx;
6036 /* Failing that, generate a compare-and-swap loop in which we perform the
6037 operation with normal arithmetic instructions. */
6038 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6040 rtx t0 = gen_reg_rtx (mode), t1;
6042 start_sequence ();
6044 t1 = t0;
6045 if (code == NOT)
6047 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6048 code = AND;
6050 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6051 true, OPTAB_LIB_WIDEN);
6053 insn = get_insns ();
6054 end_sequence ();
6056 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6057 return const0_rtx;
6060 return NULL_RTX;
6063 /* This function generates the atomic operation MEM CODE= VAL. In this
6064 case, we do care about the resulting value: if AFTER is true then
6065 return the value MEM holds after the operation, if AFTER is false
6066 then return the value MEM holds before the operation. TARGET is an
6067 optional place for the result value to be stored. */
6070 expand_sync_fetch_operation (rtx mem, rtx val, enum rtx_code code,
6071 bool after, rtx target)
6073 enum machine_mode mode = GET_MODE (mem);
6074 enum insn_code old_code, new_code, icode;
6075 bool compensate;
6076 rtx insn;
6078 /* Look to see if the target supports the operation directly. */
6079 switch (code)
6081 case PLUS:
6082 old_code = sync_old_add_optab[mode];
6083 new_code = sync_new_add_optab[mode];
6084 break;
6085 case IOR:
6086 old_code = sync_old_ior_optab[mode];
6087 new_code = sync_new_ior_optab[mode];
6088 break;
6089 case XOR:
6090 old_code = sync_old_xor_optab[mode];
6091 new_code = sync_new_xor_optab[mode];
6092 break;
6093 case AND:
6094 old_code = sync_old_and_optab[mode];
6095 new_code = sync_new_and_optab[mode];
6096 break;
6097 case NOT:
6098 old_code = sync_old_nand_optab[mode];
6099 new_code = sync_new_nand_optab[mode];
6100 break;
6102 case MINUS:
6103 old_code = sync_old_sub_optab[mode];
6104 new_code = sync_new_sub_optab[mode];
6105 if (old_code == CODE_FOR_nothing && new_code == CODE_FOR_nothing)
6107 old_code = sync_old_add_optab[mode];
6108 new_code = sync_new_add_optab[mode];
6109 if (old_code != CODE_FOR_nothing || new_code != CODE_FOR_nothing)
6111 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
6112 code = PLUS;
6115 break;
6117 default:
6118 gcc_unreachable ();
6121 /* If the target does supports the proper new/old operation, great. But
6122 if we only support the opposite old/new operation, check to see if we
6123 can compensate. In the case in which the old value is supported, then
6124 we can always perform the operation again with normal arithmetic. In
6125 the case in which the new value is supported, then we can only handle
6126 this in the case the operation is reversible. */
6127 compensate = false;
6128 if (after)
6130 icode = new_code;
6131 if (icode == CODE_FOR_nothing)
6133 icode = old_code;
6134 if (icode != CODE_FOR_nothing)
6135 compensate = true;
6138 else
6140 icode = old_code;
6141 if (icode == CODE_FOR_nothing
6142 && (code == PLUS || code == MINUS || code == XOR))
6144 icode = new_code;
6145 if (icode != CODE_FOR_nothing)
6146 compensate = true;
6150 /* If we found something supported, great. */
6151 if (icode != CODE_FOR_nothing)
6153 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6154 target = gen_reg_rtx (mode);
6156 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6157 val = convert_modes (mode, GET_MODE (val), val, 1);
6158 if (!insn_data[icode].operand[2].predicate (val, mode))
6159 val = force_reg (mode, val);
6161 insn = GEN_FCN (icode) (target, mem, val);
6162 if (insn)
6164 emit_insn (insn);
6166 /* If we need to compensate for using an operation with the
6167 wrong return value, do so now. */
6168 if (compensate)
6170 if (!after)
6172 if (code == PLUS)
6173 code = MINUS;
6174 else if (code == MINUS)
6175 code = PLUS;
6178 if (code == NOT)
6179 target = expand_simple_unop (mode, NOT, target, NULL_RTX, true);
6180 target = expand_simple_binop (mode, code, target, val, NULL_RTX,
6181 true, OPTAB_LIB_WIDEN);
6184 return target;
6188 /* Failing that, generate a compare-and-swap loop in which we perform the
6189 operation with normal arithmetic instructions. */
6190 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6192 rtx t0 = gen_reg_rtx (mode), t1;
6194 if (!target || !register_operand (target, mode))
6195 target = gen_reg_rtx (mode);
6197 start_sequence ();
6199 if (!after)
6200 emit_move_insn (target, t0);
6201 t1 = t0;
6202 if (code == NOT)
6204 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6205 code = AND;
6207 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6208 true, OPTAB_LIB_WIDEN);
6209 if (after)
6210 emit_move_insn (target, t1);
6212 insn = get_insns ();
6213 end_sequence ();
6215 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6216 return target;
6219 return NULL_RTX;
6222 /* This function expands a test-and-set operation. Ideally we atomically
6223 store VAL in MEM and return the previous value in MEM. Some targets
6224 may not support this operation and only support VAL with the constant 1;
6225 in this case while the return value will be 0/1, but the exact value
6226 stored in MEM is target defined. TARGET is an option place to stick
6227 the return value. */
6230 expand_sync_lock_test_and_set (rtx mem, rtx val, rtx target)
6232 enum machine_mode mode = GET_MODE (mem);
6233 enum insn_code icode;
6234 rtx insn;
6236 /* If the target supports the test-and-set directly, great. */
6237 icode = sync_lock_test_and_set[mode];
6238 if (icode != CODE_FOR_nothing)
6240 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6241 target = gen_reg_rtx (mode);
6243 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6244 val = convert_modes (mode, GET_MODE (val), val, 1);
6245 if (!insn_data[icode].operand[2].predicate (val, mode))
6246 val = force_reg (mode, val);
6248 insn = GEN_FCN (icode) (target, mem, val);
6249 if (insn)
6251 emit_insn (insn);
6252 return target;
6256 /* Otherwise, use a compare-and-swap loop for the exchange. */
6257 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6259 if (!target || !register_operand (target, mode))
6260 target = gen_reg_rtx (mode);
6261 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6262 val = convert_modes (mode, GET_MODE (val), val, 1);
6263 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
6264 return target;
6267 return NULL_RTX;
6270 #include "gt-optabs.h"