toplev.c (floor_log2, exact_log2): Don't define if __cplusplus.
[official-gcc.git] / gcc / optabs.c
blob48a3406dc6bb0b4e15405eef2d052dfdb40cbb97
1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "toplev.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
32 #include "rtl.h"
33 #include "tree.h"
34 #include "tm_p.h"
35 #include "flags.h"
36 #include "function.h"
37 #include "except.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "libfuncs.h"
41 #include "recog.h"
42 #include "reload.h"
43 #include "ggc.h"
44 #include "real.h"
45 #include "basic-block.h"
46 #include "target.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 optab optab_table[OTI_MAX];
59 rtx libfunc_table[LTI_MAX];
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table[COI_MAX];
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab[NUM_RTX_CODE + 1];
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
70 rtxfun bcc_gen_fctn[NUM_RTX_CODE];
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
76 enum insn_code setcc_gen_code[NUM_RTX_CODE];
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
84 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
85 #endif
87 /* Indexed by the machine mode, gives the insn code for vector conditional
88 operation. */
90 enum insn_code vcond_gen_code[NUM_MACHINE_MODES];
91 enum insn_code vcondu_gen_code[NUM_MACHINE_MODES];
93 /* The insn generating function can not take an rtx_code argument.
94 TRAP_RTX is used as an rtx argument. Its code is replaced with
95 the code to be used in the trap insn and all other fields are ignored. */
96 static GTY(()) rtx trap_rtx;
98 static int add_equal_note (rtx, rtx, enum rtx_code, rtx, rtx);
99 static rtx widen_operand (rtx, enum machine_mode, enum machine_mode, int,
100 int);
101 static void prepare_cmp_insn (rtx *, rtx *, enum rtx_code *, rtx,
102 enum machine_mode *, int *,
103 enum can_compare_purpose);
104 static enum insn_code can_fix_p (enum machine_mode, enum machine_mode, int,
105 int *);
106 static enum insn_code can_float_p (enum machine_mode, enum machine_mode, int);
107 static optab new_optab (void);
108 static convert_optab new_convert_optab (void);
109 static inline optab init_optab (enum rtx_code);
110 static inline optab init_optabv (enum rtx_code);
111 static inline convert_optab init_convert_optab (enum rtx_code);
112 static void init_libfuncs (optab, int, int, const char *, int);
113 static void init_integral_libfuncs (optab, const char *, int);
114 static void init_floating_libfuncs (optab, const char *, int);
115 static void init_interclass_conv_libfuncs (convert_optab, const char *,
116 enum mode_class, enum mode_class);
117 static void init_intraclass_conv_libfuncs (convert_optab, const char *,
118 enum mode_class, bool);
119 static void emit_cmp_and_jump_insn_1 (rtx, rtx, enum machine_mode,
120 enum rtx_code, int, rtx);
121 static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *,
122 enum machine_mode *, int *);
123 static rtx widen_clz (enum machine_mode, rtx, rtx);
124 static rtx expand_parity (enum machine_mode, rtx, rtx);
125 static enum rtx_code get_rtx_code (enum tree_code, bool);
126 static rtx vector_compare_rtx (tree, bool, enum insn_code);
128 #ifndef HAVE_conditional_trap
129 #define HAVE_conditional_trap 0
130 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
131 #endif
133 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
134 the result of operation CODE applied to OP0 (and OP1 if it is a binary
135 operation).
137 If the last insn does not set TARGET, don't do anything, but return 1.
139 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
140 don't add the REG_EQUAL note but return 0. Our caller can then try
141 again, ensuring that TARGET is not one of the operands. */
143 static int
144 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
146 rtx last_insn, insn, set;
147 rtx note;
149 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
151 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
152 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
153 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
154 && GET_RTX_CLASS (code) != RTX_COMPARE
155 && GET_RTX_CLASS (code) != RTX_UNARY)
156 return 1;
158 if (GET_CODE (target) == ZERO_EXTRACT)
159 return 1;
161 for (last_insn = insns;
162 NEXT_INSN (last_insn) != NULL_RTX;
163 last_insn = NEXT_INSN (last_insn))
166 set = single_set (last_insn);
167 if (set == NULL_RTX)
168 return 1;
170 if (! rtx_equal_p (SET_DEST (set), target)
171 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
172 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
173 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
174 return 1;
176 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
177 besides the last insn. */
178 if (reg_overlap_mentioned_p (target, op0)
179 || (op1 && reg_overlap_mentioned_p (target, op1)))
181 insn = PREV_INSN (last_insn);
182 while (insn != NULL_RTX)
184 if (reg_set_p (target, insn))
185 return 0;
187 insn = PREV_INSN (insn);
191 if (GET_RTX_CLASS (code) == RTX_UNARY)
192 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
193 else
194 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
196 set_unique_reg_note (last_insn, REG_EQUAL, note);
198 return 1;
201 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
202 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
203 not actually do a sign-extend or zero-extend, but can leave the
204 higher-order bits of the result rtx undefined, for example, in the case
205 of logical operations, but not right shifts. */
207 static rtx
208 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
209 int unsignedp, int no_extend)
211 rtx result;
213 /* If we don't have to extend and this is a constant, return it. */
214 if (no_extend && GET_MODE (op) == VOIDmode)
215 return op;
217 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
218 extend since it will be more efficient to do so unless the signedness of
219 a promoted object differs from our extension. */
220 if (! no_extend
221 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
222 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
223 return convert_modes (mode, oldmode, op, unsignedp);
225 /* If MODE is no wider than a single word, we return a paradoxical
226 SUBREG. */
227 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
228 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
230 /* Otherwise, get an object of MODE, clobber it, and set the low-order
231 part to OP. */
233 result = gen_reg_rtx (mode);
234 emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
235 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
236 return result;
239 /* Return the optab used for computing the operation given by
240 the tree code, CODE. This function is not always usable (for
241 example, it cannot give complete results for multiplication
242 or division) but probably ought to be relied on more widely
243 throughout the expander. */
244 optab
245 optab_for_tree_code (enum tree_code code, tree type)
247 bool trapv;
248 switch (code)
250 case BIT_AND_EXPR:
251 return and_optab;
253 case BIT_IOR_EXPR:
254 return ior_optab;
256 case BIT_NOT_EXPR:
257 return one_cmpl_optab;
259 case BIT_XOR_EXPR:
260 return xor_optab;
262 case TRUNC_MOD_EXPR:
263 case CEIL_MOD_EXPR:
264 case FLOOR_MOD_EXPR:
265 case ROUND_MOD_EXPR:
266 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
268 case RDIV_EXPR:
269 case TRUNC_DIV_EXPR:
270 case CEIL_DIV_EXPR:
271 case FLOOR_DIV_EXPR:
272 case ROUND_DIV_EXPR:
273 case EXACT_DIV_EXPR:
274 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
276 case LSHIFT_EXPR:
277 return ashl_optab;
279 case RSHIFT_EXPR:
280 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
282 case LROTATE_EXPR:
283 return rotl_optab;
285 case RROTATE_EXPR:
286 return rotr_optab;
288 case MAX_EXPR:
289 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
291 case MIN_EXPR:
292 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
294 case REALIGN_LOAD_EXPR:
295 return vec_realign_load_optab;
297 case WIDEN_SUM_EXPR:
298 return TYPE_UNSIGNED (type) ? usum_widen_optab : ssum_widen_optab;
300 case DOT_PROD_EXPR:
301 return TYPE_UNSIGNED (type) ? udot_prod_optab : sdot_prod_optab;
303 case REDUC_MAX_EXPR:
304 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
306 case REDUC_MIN_EXPR:
307 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
309 case REDUC_PLUS_EXPR:
310 return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
312 case VEC_LSHIFT_EXPR:
313 return vec_shl_optab;
315 case VEC_RSHIFT_EXPR:
316 return vec_shr_optab;
318 default:
319 break;
322 trapv = flag_trapv && INTEGRAL_TYPE_P (type) && !TYPE_UNSIGNED (type);
323 switch (code)
325 case PLUS_EXPR:
326 return trapv ? addv_optab : add_optab;
328 case MINUS_EXPR:
329 return trapv ? subv_optab : sub_optab;
331 case MULT_EXPR:
332 return trapv ? smulv_optab : smul_optab;
334 case NEGATE_EXPR:
335 return trapv ? negv_optab : neg_optab;
337 case ABS_EXPR:
338 return trapv ? absv_optab : abs_optab;
340 default:
341 return NULL;
346 /* Expand vector widening operations.
348 There are two different classes of operations handled here:
349 1) Operations whose result is wider than all the arguments to the operation.
350 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
351 In this case OP0 and optionally OP1 would be initialized,
352 but WIDE_OP wouldn't (not relevant for this case).
353 2) Operations whose result is of the same size as the last argument to the
354 operation, but wider than all the other arguments to the operation.
355 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
356 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
358 E.g, when called to expand the following operations, this is how
359 the arguments will be initialized:
360 nops OP0 OP1 WIDE_OP
361 widening-sum 2 oprnd0 - oprnd1
362 widening-dot-product 3 oprnd0 oprnd1 oprnd2
363 widening-mult 2 oprnd0 oprnd1 -
364 type-promotion (vec-unpack) 1 oprnd0 - - */
367 expand_widen_pattern_expr (tree exp, rtx op0, rtx op1, rtx wide_op, rtx target,
368 int unsignedp)
370 tree oprnd0, oprnd1, oprnd2;
371 enum machine_mode wmode = 0, tmode0, tmode1 = 0;
372 optab widen_pattern_optab;
373 int icode;
374 enum machine_mode xmode0, xmode1 = 0, wxmode = 0;
375 rtx temp;
376 rtx pat;
377 rtx xop0, xop1, wxop;
378 int nops = TREE_CODE_LENGTH (TREE_CODE (exp));
380 oprnd0 = TREE_OPERAND (exp, 0);
381 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
382 widen_pattern_optab =
383 optab_for_tree_code (TREE_CODE (exp), TREE_TYPE (oprnd0));
384 icode = (int) widen_pattern_optab->handlers[(int) tmode0].insn_code;
385 gcc_assert (icode != CODE_FOR_nothing);
386 xmode0 = insn_data[icode].operand[1].mode;
388 if (nops >= 2)
390 oprnd1 = TREE_OPERAND (exp, 1);
391 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
392 xmode1 = insn_data[icode].operand[2].mode;
395 /* The last operand is of a wider mode than the rest of the operands. */
396 if (nops == 2)
398 wmode = tmode1;
399 wxmode = xmode1;
401 else if (nops == 3)
403 gcc_assert (tmode1 == tmode0);
404 gcc_assert (op1);
405 oprnd2 = TREE_OPERAND (exp, 2);
406 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
407 wxmode = insn_data[icode].operand[3].mode;
410 if (!wide_op)
411 wmode = wxmode = insn_data[icode].operand[0].mode;
413 if (!target
414 || ! (*insn_data[icode].operand[0].predicate) (target, wmode))
415 temp = gen_reg_rtx (wmode);
416 else
417 temp = target;
419 xop0 = op0;
420 xop1 = op1;
421 wxop = wide_op;
423 /* In case the insn wants input operands in modes different from
424 those of the actual operands, convert the operands. It would
425 seem that we don't need to convert CONST_INTs, but we do, so
426 that they're properly zero-extended, sign-extended or truncated
427 for their mode. */
429 if (GET_MODE (op0) != xmode0 && xmode0 != VOIDmode)
430 xop0 = convert_modes (xmode0,
431 GET_MODE (op0) != VOIDmode
432 ? GET_MODE (op0)
433 : tmode0,
434 xop0, unsignedp);
436 if (op1)
437 if (GET_MODE (op1) != xmode1 && xmode1 != VOIDmode)
438 xop1 = convert_modes (xmode1,
439 GET_MODE (op1) != VOIDmode
440 ? GET_MODE (op1)
441 : tmode1,
442 xop1, unsignedp);
444 if (wide_op)
445 if (GET_MODE (wide_op) != wxmode && wxmode != VOIDmode)
446 wxop = convert_modes (wxmode,
447 GET_MODE (wide_op) != VOIDmode
448 ? GET_MODE (wide_op)
449 : wmode,
450 wxop, unsignedp);
452 /* Now, if insn's predicates don't allow our operands, put them into
453 pseudo regs. */
455 if (! (*insn_data[icode].operand[1].predicate) (xop0, xmode0)
456 && xmode0 != VOIDmode)
457 xop0 = copy_to_mode_reg (xmode0, xop0);
459 if (op1)
461 if (! (*insn_data[icode].operand[2].predicate) (xop1, xmode1)
462 && xmode1 != VOIDmode)
463 xop1 = copy_to_mode_reg (xmode1, xop1);
465 if (wide_op)
467 if (! (*insn_data[icode].operand[3].predicate) (wxop, wxmode)
468 && wxmode != VOIDmode)
469 wxop = copy_to_mode_reg (wxmode, wxop);
471 pat = GEN_FCN (icode) (temp, xop0, xop1, wxop);
473 else
474 pat = GEN_FCN (icode) (temp, xop0, xop1);
476 else
478 if (wide_op)
480 if (! (*insn_data[icode].operand[2].predicate) (wxop, wxmode)
481 && wxmode != VOIDmode)
482 wxop = copy_to_mode_reg (wxmode, wxop);
484 pat = GEN_FCN (icode) (temp, xop0, wxop);
486 else
487 pat = GEN_FCN (icode) (temp, xop0);
490 emit_insn (pat);
491 return temp;
494 /* Generate code to perform an operation specified by TERNARY_OPTAB
495 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
497 UNSIGNEDP is for the case where we have to widen the operands
498 to perform the operation. It says to use zero-extension.
500 If TARGET is nonzero, the value
501 is generated there, if it is convenient to do so.
502 In all cases an rtx is returned for the locus of the value;
503 this may or may not be TARGET. */
506 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
507 rtx op1, rtx op2, rtx target, int unsignedp)
509 int icode = (int) ternary_optab->handlers[(int) mode].insn_code;
510 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
511 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
512 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
513 rtx temp;
514 rtx pat;
515 rtx xop0 = op0, xop1 = op1, xop2 = op2;
517 gcc_assert (ternary_optab->handlers[(int) mode].insn_code
518 != CODE_FOR_nothing);
520 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
521 temp = gen_reg_rtx (mode);
522 else
523 temp = target;
525 /* In case the insn wants input operands in modes different from
526 those of the actual operands, convert the operands. It would
527 seem that we don't need to convert CONST_INTs, but we do, so
528 that they're properly zero-extended, sign-extended or truncated
529 for their mode. */
531 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
532 xop0 = convert_modes (mode0,
533 GET_MODE (op0) != VOIDmode
534 ? GET_MODE (op0)
535 : mode,
536 xop0, unsignedp);
538 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
539 xop1 = convert_modes (mode1,
540 GET_MODE (op1) != VOIDmode
541 ? GET_MODE (op1)
542 : mode,
543 xop1, unsignedp);
545 if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
546 xop2 = convert_modes (mode2,
547 GET_MODE (op2) != VOIDmode
548 ? GET_MODE (op2)
549 : mode,
550 xop2, unsignedp);
552 /* Now, if insn's predicates don't allow our operands, put them into
553 pseudo regs. */
555 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
556 && mode0 != VOIDmode)
557 xop0 = copy_to_mode_reg (mode0, xop0);
559 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
560 && mode1 != VOIDmode)
561 xop1 = copy_to_mode_reg (mode1, xop1);
563 if (!insn_data[icode].operand[3].predicate (xop2, mode2)
564 && mode2 != VOIDmode)
565 xop2 = copy_to_mode_reg (mode2, xop2);
567 pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
569 emit_insn (pat);
570 return temp;
574 /* Like expand_binop, but return a constant rtx if the result can be
575 calculated at compile time. The arguments and return value are
576 otherwise the same as for expand_binop. */
578 static rtx
579 simplify_expand_binop (enum machine_mode mode, optab binoptab,
580 rtx op0, rtx op1, rtx target, int unsignedp,
581 enum optab_methods methods)
583 if (CONSTANT_P (op0) && CONSTANT_P (op1))
585 rtx x = simplify_binary_operation (binoptab->code, mode, op0, op1);
587 if (x)
588 return x;
591 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
594 /* Like simplify_expand_binop, but always put the result in TARGET.
595 Return true if the expansion succeeded. */
597 bool
598 force_expand_binop (enum machine_mode mode, optab binoptab,
599 rtx op0, rtx op1, rtx target, int unsignedp,
600 enum optab_methods methods)
602 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
603 target, unsignedp, methods);
604 if (x == 0)
605 return false;
606 if (x != target)
607 emit_move_insn (target, x);
608 return true;
611 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
614 expand_vec_shift_expr (tree vec_shift_expr, rtx target)
616 enum insn_code icode;
617 rtx rtx_op1, rtx_op2;
618 enum machine_mode mode1;
619 enum machine_mode mode2;
620 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_shift_expr));
621 tree vec_oprnd = TREE_OPERAND (vec_shift_expr, 0);
622 tree shift_oprnd = TREE_OPERAND (vec_shift_expr, 1);
623 optab shift_optab;
624 rtx pat;
626 switch (TREE_CODE (vec_shift_expr))
628 case VEC_RSHIFT_EXPR:
629 shift_optab = vec_shr_optab;
630 break;
631 case VEC_LSHIFT_EXPR:
632 shift_optab = vec_shl_optab;
633 break;
634 default:
635 gcc_unreachable ();
638 icode = (int) shift_optab->handlers[(int) mode].insn_code;
639 gcc_assert (icode != CODE_FOR_nothing);
641 mode1 = insn_data[icode].operand[1].mode;
642 mode2 = insn_data[icode].operand[2].mode;
644 rtx_op1 = expand_expr (vec_oprnd, NULL_RTX, VOIDmode, EXPAND_NORMAL);
645 if (!(*insn_data[icode].operand[1].predicate) (rtx_op1, mode1)
646 && mode1 != VOIDmode)
647 rtx_op1 = force_reg (mode1, rtx_op1);
649 rtx_op2 = expand_expr (shift_oprnd, NULL_RTX, VOIDmode, EXPAND_NORMAL);
650 if (!(*insn_data[icode].operand[2].predicate) (rtx_op2, mode2)
651 && mode2 != VOIDmode)
652 rtx_op2 = force_reg (mode2, rtx_op2);
654 if (!target
655 || ! (*insn_data[icode].operand[0].predicate) (target, mode))
656 target = gen_reg_rtx (mode);
658 /* Emit instruction */
659 pat = GEN_FCN (icode) (target, rtx_op1, rtx_op2);
660 gcc_assert (pat);
661 emit_insn (pat);
663 return target;
666 /* This subroutine of expand_doubleword_shift handles the cases in which
667 the effective shift value is >= BITS_PER_WORD. The arguments and return
668 value are the same as for the parent routine, except that SUPERWORD_OP1
669 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
670 INTO_TARGET may be null if the caller has decided to calculate it. */
672 static bool
673 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
674 rtx outof_target, rtx into_target,
675 int unsignedp, enum optab_methods methods)
677 if (into_target != 0)
678 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
679 into_target, unsignedp, methods))
680 return false;
682 if (outof_target != 0)
684 /* For a signed right shift, we must fill OUTOF_TARGET with copies
685 of the sign bit, otherwise we must fill it with zeros. */
686 if (binoptab != ashr_optab)
687 emit_move_insn (outof_target, CONST0_RTX (word_mode));
688 else
689 if (!force_expand_binop (word_mode, binoptab,
690 outof_input, GEN_INT (BITS_PER_WORD - 1),
691 outof_target, unsignedp, methods))
692 return false;
694 return true;
697 /* This subroutine of expand_doubleword_shift handles the cases in which
698 the effective shift value is < BITS_PER_WORD. The arguments and return
699 value are the same as for the parent routine. */
701 static bool
702 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
703 rtx outof_input, rtx into_input, rtx op1,
704 rtx outof_target, rtx into_target,
705 int unsignedp, enum optab_methods methods,
706 unsigned HOST_WIDE_INT shift_mask)
708 optab reverse_unsigned_shift, unsigned_shift;
709 rtx tmp, carries;
711 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
712 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
714 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
715 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
716 the opposite direction to BINOPTAB. */
717 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
719 carries = outof_input;
720 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
721 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
722 0, true, methods);
724 else
726 /* We must avoid shifting by BITS_PER_WORD bits since that is either
727 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
728 has unknown behavior. Do a single shift first, then shift by the
729 remainder. It's OK to use ~OP1 as the remainder if shift counts
730 are truncated to the mode size. */
731 carries = expand_binop (word_mode, reverse_unsigned_shift,
732 outof_input, const1_rtx, 0, unsignedp, methods);
733 if (shift_mask == BITS_PER_WORD - 1)
735 tmp = immed_double_const (-1, -1, op1_mode);
736 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
737 0, true, methods);
739 else
741 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
742 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
743 0, true, methods);
746 if (tmp == 0 || carries == 0)
747 return false;
748 carries = expand_binop (word_mode, reverse_unsigned_shift,
749 carries, tmp, 0, unsignedp, methods);
750 if (carries == 0)
751 return false;
753 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
754 so the result can go directly into INTO_TARGET if convenient. */
755 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
756 into_target, unsignedp, methods);
757 if (tmp == 0)
758 return false;
760 /* Now OR in the bits carried over from OUTOF_INPUT. */
761 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
762 into_target, unsignedp, methods))
763 return false;
765 /* Use a standard word_mode shift for the out-of half. */
766 if (outof_target != 0)
767 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
768 outof_target, unsignedp, methods))
769 return false;
771 return true;
775 #ifdef HAVE_conditional_move
776 /* Try implementing expand_doubleword_shift using conditional moves.
777 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
778 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
779 are the shift counts to use in the former and latter case. All other
780 arguments are the same as the parent routine. */
782 static bool
783 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
784 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
785 rtx outof_input, rtx into_input,
786 rtx subword_op1, rtx superword_op1,
787 rtx outof_target, rtx into_target,
788 int unsignedp, enum optab_methods methods,
789 unsigned HOST_WIDE_INT shift_mask)
791 rtx outof_superword, into_superword;
793 /* Put the superword version of the output into OUTOF_SUPERWORD and
794 INTO_SUPERWORD. */
795 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
796 if (outof_target != 0 && subword_op1 == superword_op1)
798 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
799 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
800 into_superword = outof_target;
801 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
802 outof_superword, 0, unsignedp, methods))
803 return false;
805 else
807 into_superword = gen_reg_rtx (word_mode);
808 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
809 outof_superword, into_superword,
810 unsignedp, methods))
811 return false;
814 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
815 if (!expand_subword_shift (op1_mode, binoptab,
816 outof_input, into_input, subword_op1,
817 outof_target, into_target,
818 unsignedp, methods, shift_mask))
819 return false;
821 /* Select between them. Do the INTO half first because INTO_SUPERWORD
822 might be the current value of OUTOF_TARGET. */
823 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
824 into_target, into_superword, word_mode, false))
825 return false;
827 if (outof_target != 0)
828 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
829 outof_target, outof_superword,
830 word_mode, false))
831 return false;
833 return true;
835 #endif
837 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
838 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
839 input operand; the shift moves bits in the direction OUTOF_INPUT->
840 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
841 of the target. OP1 is the shift count and OP1_MODE is its mode.
842 If OP1 is constant, it will have been truncated as appropriate
843 and is known to be nonzero.
845 If SHIFT_MASK is zero, the result of word shifts is undefined when the
846 shift count is outside the range [0, BITS_PER_WORD). This routine must
847 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
849 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
850 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
851 fill with zeros or sign bits as appropriate.
853 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
854 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
855 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
856 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
857 are undefined.
859 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
860 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
861 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
862 function wants to calculate it itself.
864 Return true if the shift could be successfully synthesized. */
866 static bool
867 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
868 rtx outof_input, rtx into_input, rtx op1,
869 rtx outof_target, rtx into_target,
870 int unsignedp, enum optab_methods methods,
871 unsigned HOST_WIDE_INT shift_mask)
873 rtx superword_op1, tmp, cmp1, cmp2;
874 rtx subword_label, done_label;
875 enum rtx_code cmp_code;
877 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
878 fill the result with sign or zero bits as appropriate. If so, the value
879 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
880 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
881 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
883 This isn't worthwhile for constant shifts since the optimizers will
884 cope better with in-range shift counts. */
885 if (shift_mask >= BITS_PER_WORD
886 && outof_target != 0
887 && !CONSTANT_P (op1))
889 if (!expand_doubleword_shift (op1_mode, binoptab,
890 outof_input, into_input, op1,
891 0, into_target,
892 unsignedp, methods, shift_mask))
893 return false;
894 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
895 outof_target, unsignedp, methods))
896 return false;
897 return true;
900 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
901 is true when the effective shift value is less than BITS_PER_WORD.
902 Set SUPERWORD_OP1 to the shift count that should be used to shift
903 OUTOF_INPUT into INTO_TARGET when the condition is false. */
904 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
905 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
907 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
908 is a subword shift count. */
909 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
910 0, true, methods);
911 cmp2 = CONST0_RTX (op1_mode);
912 cmp_code = EQ;
913 superword_op1 = op1;
915 else
917 /* Set CMP1 to OP1 - BITS_PER_WORD. */
918 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
919 0, true, methods);
920 cmp2 = CONST0_RTX (op1_mode);
921 cmp_code = LT;
922 superword_op1 = cmp1;
924 if (cmp1 == 0)
925 return false;
927 /* If we can compute the condition at compile time, pick the
928 appropriate subroutine. */
929 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
930 if (tmp != 0 && GET_CODE (tmp) == CONST_INT)
932 if (tmp == const0_rtx)
933 return expand_superword_shift (binoptab, outof_input, superword_op1,
934 outof_target, into_target,
935 unsignedp, methods);
936 else
937 return expand_subword_shift (op1_mode, binoptab,
938 outof_input, into_input, op1,
939 outof_target, into_target,
940 unsignedp, methods, shift_mask);
943 #ifdef HAVE_conditional_move
944 /* Try using conditional moves to generate straight-line code. */
946 rtx start = get_last_insn ();
947 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
948 cmp_code, cmp1, cmp2,
949 outof_input, into_input,
950 op1, superword_op1,
951 outof_target, into_target,
952 unsignedp, methods, shift_mask))
953 return true;
954 delete_insns_since (start);
956 #endif
958 /* As a last resort, use branches to select the correct alternative. */
959 subword_label = gen_label_rtx ();
960 done_label = gen_label_rtx ();
962 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
963 0, 0, subword_label);
965 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
966 outof_target, into_target,
967 unsignedp, methods))
968 return false;
970 emit_jump_insn (gen_jump (done_label));
971 emit_barrier ();
972 emit_label (subword_label);
974 if (!expand_subword_shift (op1_mode, binoptab,
975 outof_input, into_input, op1,
976 outof_target, into_target,
977 unsignedp, methods, shift_mask))
978 return false;
980 emit_label (done_label);
981 return true;
984 /* Subroutine of expand_binop. Perform a double word multiplication of
985 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
986 as the target's word_mode. This function return NULL_RTX if anything
987 goes wrong, in which case it may have already emitted instructions
988 which need to be deleted.
990 If we want to multiply two two-word values and have normal and widening
991 multiplies of single-word values, we can do this with three smaller
992 multiplications. Note that we do not make a REG_NO_CONFLICT block here
993 because we are not operating on one word at a time.
995 The multiplication proceeds as follows:
996 _______________________
997 [__op0_high_|__op0_low__]
998 _______________________
999 * [__op1_high_|__op1_low__]
1000 _______________________________________________
1001 _______________________
1002 (1) [__op0_low__*__op1_low__]
1003 _______________________
1004 (2a) [__op0_low__*__op1_high_]
1005 _______________________
1006 (2b) [__op0_high_*__op1_low__]
1007 _______________________
1008 (3) [__op0_high_*__op1_high_]
1011 This gives a 4-word result. Since we are only interested in the
1012 lower 2 words, partial result (3) and the upper words of (2a) and
1013 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1014 calculated using non-widening multiplication.
1016 (1), however, needs to be calculated with an unsigned widening
1017 multiplication. If this operation is not directly supported we
1018 try using a signed widening multiplication and adjust the result.
1019 This adjustment works as follows:
1021 If both operands are positive then no adjustment is needed.
1023 If the operands have different signs, for example op0_low < 0 and
1024 op1_low >= 0, the instruction treats the most significant bit of
1025 op0_low as a sign bit instead of a bit with significance
1026 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1027 with 2**BITS_PER_WORD - op0_low, and two's complements the
1028 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1029 the result.
1031 Similarly, if both operands are negative, we need to add
1032 (op0_low + op1_low) * 2**BITS_PER_WORD.
1034 We use a trick to adjust quickly. We logically shift op0_low right
1035 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1036 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1037 logical shift exists, we do an arithmetic right shift and subtract
1038 the 0 or -1. */
1040 static rtx
1041 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
1042 bool umulp, enum optab_methods methods)
1044 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
1045 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
1046 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
1047 rtx product, adjust, product_high, temp;
1049 rtx op0_high = operand_subword_force (op0, high, mode);
1050 rtx op0_low = operand_subword_force (op0, low, mode);
1051 rtx op1_high = operand_subword_force (op1, high, mode);
1052 rtx op1_low = operand_subword_force (op1, low, mode);
1054 /* If we're using an unsigned multiply to directly compute the product
1055 of the low-order words of the operands and perform any required
1056 adjustments of the operands, we begin by trying two more multiplications
1057 and then computing the appropriate sum.
1059 We have checked above that the required addition is provided.
1060 Full-word addition will normally always succeed, especially if
1061 it is provided at all, so we don't worry about its failure. The
1062 multiplication may well fail, however, so we do handle that. */
1064 if (!umulp)
1066 /* ??? This could be done with emit_store_flag where available. */
1067 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
1068 NULL_RTX, 1, methods);
1069 if (temp)
1070 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
1071 NULL_RTX, 0, OPTAB_DIRECT);
1072 else
1074 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
1075 NULL_RTX, 0, methods);
1076 if (!temp)
1077 return NULL_RTX;
1078 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
1079 NULL_RTX, 0, OPTAB_DIRECT);
1082 if (!op0_high)
1083 return NULL_RTX;
1086 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
1087 NULL_RTX, 0, OPTAB_DIRECT);
1088 if (!adjust)
1089 return NULL_RTX;
1091 /* OP0_HIGH should now be dead. */
1093 if (!umulp)
1095 /* ??? This could be done with emit_store_flag where available. */
1096 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
1097 NULL_RTX, 1, methods);
1098 if (temp)
1099 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
1100 NULL_RTX, 0, OPTAB_DIRECT);
1101 else
1103 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
1104 NULL_RTX, 0, methods);
1105 if (!temp)
1106 return NULL_RTX;
1107 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
1108 NULL_RTX, 0, OPTAB_DIRECT);
1111 if (!op1_high)
1112 return NULL_RTX;
1115 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
1116 NULL_RTX, 0, OPTAB_DIRECT);
1117 if (!temp)
1118 return NULL_RTX;
1120 /* OP1_HIGH should now be dead. */
1122 adjust = expand_binop (word_mode, add_optab, adjust, temp,
1123 adjust, 0, OPTAB_DIRECT);
1125 if (target && !REG_P (target))
1126 target = NULL_RTX;
1128 if (umulp)
1129 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
1130 target, 1, OPTAB_DIRECT);
1131 else
1132 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
1133 target, 1, OPTAB_DIRECT);
1135 if (!product)
1136 return NULL_RTX;
1138 product_high = operand_subword (product, high, 1, mode);
1139 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
1140 REG_P (product_high) ? product_high : adjust,
1141 0, OPTAB_DIRECT);
1142 emit_move_insn (product_high, adjust);
1143 return product;
1146 /* Wrapper around expand_binop which takes an rtx code to specify
1147 the operation to perform, not an optab pointer. All other
1148 arguments are the same. */
1150 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
1151 rtx op1, rtx target, int unsignedp,
1152 enum optab_methods methods)
1154 optab binop = code_to_optab[(int) code];
1155 gcc_assert (binop);
1157 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1160 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1161 binop. Order them according to commutative_operand_precedence and, if
1162 possible, try to put TARGET or a pseudo first. */
1163 static bool
1164 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
1166 int op0_prec = commutative_operand_precedence (op0);
1167 int op1_prec = commutative_operand_precedence (op1);
1169 if (op0_prec < op1_prec)
1170 return true;
1172 if (op0_prec > op1_prec)
1173 return false;
1175 /* With equal precedence, both orders are ok, but it is better if the
1176 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1177 if (target == 0 || REG_P (target))
1178 return (REG_P (op1) && !REG_P (op0)) || target == op1;
1179 else
1180 return rtx_equal_p (op1, target);
1184 /* Generate code to perform an operation specified by BINOPTAB
1185 on operands OP0 and OP1, with result having machine-mode MODE.
1187 UNSIGNEDP is for the case where we have to widen the operands
1188 to perform the operation. It says to use zero-extension.
1190 If TARGET is nonzero, the value
1191 is generated there, if it is convenient to do so.
1192 In all cases an rtx is returned for the locus of the value;
1193 this may or may not be TARGET. */
1196 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
1197 rtx target, int unsignedp, enum optab_methods methods)
1199 enum optab_methods next_methods
1200 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1201 ? OPTAB_WIDEN : methods);
1202 enum mode_class class;
1203 enum machine_mode wider_mode;
1204 rtx temp;
1205 int commutative_op = 0;
1206 int shift_op = (binoptab->code == ASHIFT
1207 || binoptab->code == ASHIFTRT
1208 || binoptab->code == LSHIFTRT
1209 || binoptab->code == ROTATE
1210 || binoptab->code == ROTATERT);
1211 rtx entry_last = get_last_insn ();
1212 rtx last;
1213 bool first_pass_p = true;
1215 class = GET_MODE_CLASS (mode);
1217 /* If subtracting an integer constant, convert this into an addition of
1218 the negated constant. */
1220 if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
1222 op1 = negate_rtx (mode, op1);
1223 binoptab = add_optab;
1226 /* If we are inside an appropriately-short loop and we are optimizing,
1227 force expensive constants into a register. */
1228 if (CONSTANT_P (op0) && optimize
1229 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
1231 if (GET_MODE (op0) != VOIDmode)
1232 op0 = convert_modes (mode, VOIDmode, op0, unsignedp);
1233 op0 = force_reg (mode, op0);
1236 if (CONSTANT_P (op1) && optimize
1237 && ! shift_op && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
1239 if (GET_MODE (op1) != VOIDmode)
1240 op1 = convert_modes (mode, VOIDmode, op1, unsignedp);
1241 op1 = force_reg (mode, op1);
1244 /* Record where to delete back to if we backtrack. */
1245 last = get_last_insn ();
1247 /* If operation is commutative,
1248 try to make the first operand a register.
1249 Even better, try to make it the same as the target.
1250 Also try to make the last operand a constant. */
1251 if (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1252 || binoptab == smul_widen_optab
1253 || binoptab == umul_widen_optab
1254 || binoptab == smul_highpart_optab
1255 || binoptab == umul_highpart_optab)
1257 commutative_op = 1;
1259 if (swap_commutative_operands_with_target (target, op0, op1))
1261 temp = op1;
1262 op1 = op0;
1263 op0 = temp;
1267 retry:
1269 /* If we can do it with a three-operand insn, do so. */
1271 if (methods != OPTAB_MUST_WIDEN
1272 && binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1274 int icode = (int) binoptab->handlers[(int) mode].insn_code;
1275 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1276 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1277 rtx pat;
1278 rtx xop0 = op0, xop1 = op1;
1280 if (target)
1281 temp = target;
1282 else
1283 temp = gen_reg_rtx (mode);
1285 /* If it is a commutative operator and the modes would match
1286 if we would swap the operands, we can save the conversions. */
1287 if (commutative_op)
1289 if (GET_MODE (op0) != mode0 && GET_MODE (op1) != mode1
1290 && GET_MODE (op0) == mode1 && GET_MODE (op1) == mode0)
1292 rtx tmp;
1294 tmp = op0; op0 = op1; op1 = tmp;
1295 tmp = xop0; xop0 = xop1; xop1 = tmp;
1299 /* In case the insn wants input operands in modes different from
1300 those of the actual operands, convert the operands. It would
1301 seem that we don't need to convert CONST_INTs, but we do, so
1302 that they're properly zero-extended, sign-extended or truncated
1303 for their mode. */
1305 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
1306 xop0 = convert_modes (mode0,
1307 GET_MODE (op0) != VOIDmode
1308 ? GET_MODE (op0)
1309 : mode,
1310 xop0, unsignedp);
1312 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
1313 xop1 = convert_modes (mode1,
1314 GET_MODE (op1) != VOIDmode
1315 ? GET_MODE (op1)
1316 : mode,
1317 xop1, unsignedp);
1319 /* Now, if insn's predicates don't allow our operands, put them into
1320 pseudo regs. */
1322 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
1323 && mode0 != VOIDmode)
1324 xop0 = copy_to_mode_reg (mode0, xop0);
1326 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
1327 && mode1 != VOIDmode)
1328 xop1 = copy_to_mode_reg (mode1, xop1);
1330 if (!insn_data[icode].operand[0].predicate (temp, mode))
1331 temp = gen_reg_rtx (mode);
1333 pat = GEN_FCN (icode) (temp, xop0, xop1);
1334 if (pat)
1336 /* If PAT is composed of more than one insn, try to add an appropriate
1337 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1338 operand, call ourselves again, this time without a target. */
1339 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1340 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
1342 delete_insns_since (last);
1343 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1344 unsignedp, methods);
1347 emit_insn (pat);
1348 return temp;
1350 else
1351 delete_insns_since (last);
1354 /* If we were trying to rotate by a constant value, and that didn't
1355 work, try rotating the other direction before falling back to
1356 shifts and bitwise-or. */
1357 if (first_pass_p
1358 && (binoptab == rotl_optab || binoptab == rotr_optab)
1359 && class == MODE_INT
1360 && GET_CODE (op1) == CONST_INT
1361 && INTVAL (op1) > 0
1362 && (unsigned int) INTVAL (op1) < GET_MODE_BITSIZE (mode))
1364 first_pass_p = false;
1365 op1 = GEN_INT (GET_MODE_BITSIZE (mode) - INTVAL (op1));
1366 binoptab = binoptab == rotl_optab ? rotr_optab : rotl_optab;
1367 goto retry;
1370 /* If this is a multiply, see if we can do a widening operation that
1371 takes operands of this mode and makes a wider mode. */
1373 if (binoptab == smul_optab
1374 && GET_MODE_WIDER_MODE (mode) != VOIDmode
1375 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1376 ->handlers[(int) GET_MODE_WIDER_MODE (mode)].insn_code)
1377 != CODE_FOR_nothing))
1379 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
1380 unsignedp ? umul_widen_optab : smul_widen_optab,
1381 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1383 if (temp != 0)
1385 if (GET_MODE_CLASS (mode) == MODE_INT
1386 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1387 GET_MODE_BITSIZE (GET_MODE (temp))))
1388 return gen_lowpart (mode, temp);
1389 else
1390 return convert_to_mode (mode, temp, unsignedp);
1394 /* Look for a wider mode of the same class for which we think we
1395 can open-code the operation. Check for a widening multiply at the
1396 wider mode as well. */
1398 if (CLASS_HAS_WIDER_MODES_P (class)
1399 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1400 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1401 wider_mode != VOIDmode;
1402 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1404 if (binoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
1405 || (binoptab == smul_optab
1406 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1407 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1408 ->handlers[(int) GET_MODE_WIDER_MODE (wider_mode)].insn_code)
1409 != CODE_FOR_nothing)))
1411 rtx xop0 = op0, xop1 = op1;
1412 int no_extend = 0;
1414 /* For certain integer operations, we need not actually extend
1415 the narrow operands, as long as we will truncate
1416 the results to the same narrowness. */
1418 if ((binoptab == ior_optab || binoptab == and_optab
1419 || binoptab == xor_optab
1420 || binoptab == add_optab || binoptab == sub_optab
1421 || binoptab == smul_optab || binoptab == ashl_optab)
1422 && class == MODE_INT)
1423 no_extend = 1;
1425 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1427 /* The second operand of a shift must always be extended. */
1428 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1429 no_extend && binoptab != ashl_optab);
1431 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1432 unsignedp, OPTAB_DIRECT);
1433 if (temp)
1435 if (class != MODE_INT
1436 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1437 GET_MODE_BITSIZE (wider_mode)))
1439 if (target == 0)
1440 target = gen_reg_rtx (mode);
1441 convert_move (target, temp, 0);
1442 return target;
1444 else
1445 return gen_lowpart (mode, temp);
1447 else
1448 delete_insns_since (last);
1452 /* These can be done a word at a time. */
1453 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1454 && class == MODE_INT
1455 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1456 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1458 int i;
1459 rtx insns;
1460 rtx equiv_value;
1462 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1463 won't be accurate, so use a new target. */
1464 if (target == 0 || target == op0 || target == op1)
1465 target = gen_reg_rtx (mode);
1467 start_sequence ();
1469 /* Do the actual arithmetic. */
1470 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1472 rtx target_piece = operand_subword (target, i, 1, mode);
1473 rtx x = expand_binop (word_mode, binoptab,
1474 operand_subword_force (op0, i, mode),
1475 operand_subword_force (op1, i, mode),
1476 target_piece, unsignedp, next_methods);
1478 if (x == 0)
1479 break;
1481 if (target_piece != x)
1482 emit_move_insn (target_piece, x);
1485 insns = get_insns ();
1486 end_sequence ();
1488 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1490 if (binoptab->code != UNKNOWN)
1491 equiv_value
1492 = gen_rtx_fmt_ee (binoptab->code, mode,
1493 copy_rtx (op0), copy_rtx (op1));
1494 else
1495 equiv_value = 0;
1497 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1498 return target;
1502 /* Synthesize double word shifts from single word shifts. */
1503 if ((binoptab == lshr_optab || binoptab == ashl_optab
1504 || binoptab == ashr_optab)
1505 && class == MODE_INT
1506 && (GET_CODE (op1) == CONST_INT || !optimize_size)
1507 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1508 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1509 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1510 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1512 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1513 enum machine_mode op1_mode;
1515 double_shift_mask = targetm.shift_truncation_mask (mode);
1516 shift_mask = targetm.shift_truncation_mask (word_mode);
1517 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1519 /* Apply the truncation to constant shifts. */
1520 if (double_shift_mask > 0 && GET_CODE (op1) == CONST_INT)
1521 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1523 if (op1 == CONST0_RTX (op1_mode))
1524 return op0;
1526 /* Make sure that this is a combination that expand_doubleword_shift
1527 can handle. See the comments there for details. */
1528 if (double_shift_mask == 0
1529 || (shift_mask == BITS_PER_WORD - 1
1530 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1532 rtx insns, equiv_value;
1533 rtx into_target, outof_target;
1534 rtx into_input, outof_input;
1535 int left_shift, outof_word;
1537 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1538 won't be accurate, so use a new target. */
1539 if (target == 0 || target == op0 || target == op1)
1540 target = gen_reg_rtx (mode);
1542 start_sequence ();
1544 /* OUTOF_* is the word we are shifting bits away from, and
1545 INTO_* is the word that we are shifting bits towards, thus
1546 they differ depending on the direction of the shift and
1547 WORDS_BIG_ENDIAN. */
1549 left_shift = binoptab == ashl_optab;
1550 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1552 outof_target = operand_subword (target, outof_word, 1, mode);
1553 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1555 outof_input = operand_subword_force (op0, outof_word, mode);
1556 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1558 if (expand_doubleword_shift (op1_mode, binoptab,
1559 outof_input, into_input, op1,
1560 outof_target, into_target,
1561 unsignedp, methods, shift_mask))
1563 insns = get_insns ();
1564 end_sequence ();
1566 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1567 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1568 return target;
1570 end_sequence ();
1574 /* Synthesize double word rotates from single word shifts. */
1575 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1576 && class == MODE_INT
1577 && GET_CODE (op1) == CONST_INT
1578 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1579 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1580 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1582 rtx insns;
1583 rtx into_target, outof_target;
1584 rtx into_input, outof_input;
1585 rtx inter;
1586 int shift_count, left_shift, outof_word;
1588 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1589 won't be accurate, so use a new target. Do this also if target is not
1590 a REG, first because having a register instead may open optimization
1591 opportunities, and second because if target and op0 happen to be MEMs
1592 designating the same location, we would risk clobbering it too early
1593 in the code sequence we generate below. */
1594 if (target == 0 || target == op0 || target == op1 || ! REG_P (target))
1595 target = gen_reg_rtx (mode);
1597 start_sequence ();
1599 shift_count = INTVAL (op1);
1601 /* OUTOF_* is the word we are shifting bits away from, and
1602 INTO_* is the word that we are shifting bits towards, thus
1603 they differ depending on the direction of the shift and
1604 WORDS_BIG_ENDIAN. */
1606 left_shift = (binoptab == rotl_optab);
1607 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1609 outof_target = operand_subword (target, outof_word, 1, mode);
1610 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1612 outof_input = operand_subword_force (op0, outof_word, mode);
1613 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1615 if (shift_count == BITS_PER_WORD)
1617 /* This is just a word swap. */
1618 emit_move_insn (outof_target, into_input);
1619 emit_move_insn (into_target, outof_input);
1620 inter = const0_rtx;
1622 else
1624 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1625 rtx first_shift_count, second_shift_count;
1626 optab reverse_unsigned_shift, unsigned_shift;
1628 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1629 ? lshr_optab : ashl_optab);
1631 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1632 ? ashl_optab : lshr_optab);
1634 if (shift_count > BITS_PER_WORD)
1636 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1637 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1639 else
1641 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1642 second_shift_count = GEN_INT (shift_count);
1645 into_temp1 = expand_binop (word_mode, unsigned_shift,
1646 outof_input, first_shift_count,
1647 NULL_RTX, unsignedp, next_methods);
1648 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1649 into_input, second_shift_count,
1650 NULL_RTX, unsignedp, next_methods);
1652 if (into_temp1 != 0 && into_temp2 != 0)
1653 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1654 into_target, unsignedp, next_methods);
1655 else
1656 inter = 0;
1658 if (inter != 0 && inter != into_target)
1659 emit_move_insn (into_target, inter);
1661 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1662 into_input, first_shift_count,
1663 NULL_RTX, unsignedp, next_methods);
1664 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1665 outof_input, second_shift_count,
1666 NULL_RTX, unsignedp, next_methods);
1668 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1669 inter = expand_binop (word_mode, ior_optab,
1670 outof_temp1, outof_temp2,
1671 outof_target, unsignedp, next_methods);
1673 if (inter != 0 && inter != outof_target)
1674 emit_move_insn (outof_target, inter);
1677 insns = get_insns ();
1678 end_sequence ();
1680 if (inter != 0)
1682 /* One may be tempted to wrap the insns in a REG_NO_CONFLICT
1683 block to help the register allocator a bit. But a multi-word
1684 rotate will need all the input bits when setting the output
1685 bits, so there clearly is a conflict between the input and
1686 output registers. So we can't use a no-conflict block here. */
1687 emit_insn (insns);
1688 return target;
1692 /* These can be done a word at a time by propagating carries. */
1693 if ((binoptab == add_optab || binoptab == sub_optab)
1694 && class == MODE_INT
1695 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1696 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1698 unsigned int i;
1699 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1700 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1701 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1702 rtx xop0, xop1, xtarget;
1704 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1705 value is one of those, use it. Otherwise, use 1 since it is the
1706 one easiest to get. */
1707 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1708 int normalizep = STORE_FLAG_VALUE;
1709 #else
1710 int normalizep = 1;
1711 #endif
1713 /* Prepare the operands. */
1714 xop0 = force_reg (mode, op0);
1715 xop1 = force_reg (mode, op1);
1717 xtarget = gen_reg_rtx (mode);
1719 if (target == 0 || !REG_P (target))
1720 target = xtarget;
1722 /* Indicate for flow that the entire target reg is being set. */
1723 if (REG_P (target))
1724 emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget));
1726 /* Do the actual arithmetic. */
1727 for (i = 0; i < nwords; i++)
1729 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1730 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1731 rtx op0_piece = operand_subword_force (xop0, index, mode);
1732 rtx op1_piece = operand_subword_force (xop1, index, mode);
1733 rtx x;
1735 /* Main add/subtract of the input operands. */
1736 x = expand_binop (word_mode, binoptab,
1737 op0_piece, op1_piece,
1738 target_piece, unsignedp, next_methods);
1739 if (x == 0)
1740 break;
1742 if (i + 1 < nwords)
1744 /* Store carry from main add/subtract. */
1745 carry_out = gen_reg_rtx (word_mode);
1746 carry_out = emit_store_flag_force (carry_out,
1747 (binoptab == add_optab
1748 ? LT : GT),
1749 x, op0_piece,
1750 word_mode, 1, normalizep);
1753 if (i > 0)
1755 rtx newx;
1757 /* Add/subtract previous carry to main result. */
1758 newx = expand_binop (word_mode,
1759 normalizep == 1 ? binoptab : otheroptab,
1760 x, carry_in,
1761 NULL_RTX, 1, next_methods);
1763 if (i + 1 < nwords)
1765 /* Get out carry from adding/subtracting carry in. */
1766 rtx carry_tmp = gen_reg_rtx (word_mode);
1767 carry_tmp = emit_store_flag_force (carry_tmp,
1768 (binoptab == add_optab
1769 ? LT : GT),
1770 newx, x,
1771 word_mode, 1, normalizep);
1773 /* Logical-ior the two poss. carry together. */
1774 carry_out = expand_binop (word_mode, ior_optab,
1775 carry_out, carry_tmp,
1776 carry_out, 0, next_methods);
1777 if (carry_out == 0)
1778 break;
1780 emit_move_insn (target_piece, newx);
1782 else
1784 if (x != target_piece)
1785 emit_move_insn (target_piece, x);
1788 carry_in = carry_out;
1791 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
1793 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
1794 || ! rtx_equal_p (target, xtarget))
1796 rtx temp = emit_move_insn (target, xtarget);
1798 set_unique_reg_note (temp,
1799 REG_EQUAL,
1800 gen_rtx_fmt_ee (binoptab->code, mode,
1801 copy_rtx (xop0),
1802 copy_rtx (xop1)));
1804 else
1805 target = xtarget;
1807 return target;
1810 else
1811 delete_insns_since (last);
1814 /* Attempt to synthesize double word multiplies using a sequence of word
1815 mode multiplications. We first attempt to generate a sequence using a
1816 more efficient unsigned widening multiply, and if that fails we then
1817 try using a signed widening multiply. */
1819 if (binoptab == smul_optab
1820 && class == MODE_INT
1821 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1822 && smul_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1823 && add_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1825 rtx product = NULL_RTX;
1827 if (umul_widen_optab->handlers[(int) mode].insn_code
1828 != CODE_FOR_nothing)
1830 product = expand_doubleword_mult (mode, op0, op1, target,
1831 true, methods);
1832 if (!product)
1833 delete_insns_since (last);
1836 if (product == NULL_RTX
1837 && smul_widen_optab->handlers[(int) mode].insn_code
1838 != CODE_FOR_nothing)
1840 product = expand_doubleword_mult (mode, op0, op1, target,
1841 false, methods);
1842 if (!product)
1843 delete_insns_since (last);
1846 if (product != NULL_RTX)
1848 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1850 temp = emit_move_insn (target ? target : product, product);
1851 set_unique_reg_note (temp,
1852 REG_EQUAL,
1853 gen_rtx_fmt_ee (MULT, mode,
1854 copy_rtx (op0),
1855 copy_rtx (op1)));
1857 return product;
1861 /* It can't be open-coded in this mode.
1862 Use a library call if one is available and caller says that's ok. */
1864 if (binoptab->handlers[(int) mode].libfunc
1865 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1867 rtx insns;
1868 rtx op1x = op1;
1869 enum machine_mode op1_mode = mode;
1870 rtx value;
1872 start_sequence ();
1874 if (shift_op)
1876 op1_mode = word_mode;
1877 /* Specify unsigned here,
1878 since negative shift counts are meaningless. */
1879 op1x = convert_to_mode (word_mode, op1, 1);
1882 if (GET_MODE (op0) != VOIDmode
1883 && GET_MODE (op0) != mode)
1884 op0 = convert_to_mode (mode, op0, unsignedp);
1886 /* Pass 1 for NO_QUEUE so we don't lose any increments
1887 if the libcall is cse'd or moved. */
1888 value = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
1889 NULL_RTX, LCT_CONST, mode, 2,
1890 op0, mode, op1x, op1_mode);
1892 insns = get_insns ();
1893 end_sequence ();
1895 target = gen_reg_rtx (mode);
1896 emit_libcall_block (insns, target, value,
1897 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
1899 return target;
1902 delete_insns_since (last);
1904 /* It can't be done in this mode. Can we do it in a wider mode? */
1906 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
1907 || methods == OPTAB_MUST_WIDEN))
1909 /* Caller says, don't even try. */
1910 delete_insns_since (entry_last);
1911 return 0;
1914 /* Compute the value of METHODS to pass to recursive calls.
1915 Don't allow widening to be tried recursively. */
1917 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
1919 /* Look for a wider mode of the same class for which it appears we can do
1920 the operation. */
1922 if (CLASS_HAS_WIDER_MODES_P (class))
1924 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1925 wider_mode != VOIDmode;
1926 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1928 if ((binoptab->handlers[(int) wider_mode].insn_code
1929 != CODE_FOR_nothing)
1930 || (methods == OPTAB_LIB
1931 && binoptab->handlers[(int) wider_mode].libfunc))
1933 rtx xop0 = op0, xop1 = op1;
1934 int no_extend = 0;
1936 /* For certain integer operations, we need not actually extend
1937 the narrow operands, as long as we will truncate
1938 the results to the same narrowness. */
1940 if ((binoptab == ior_optab || binoptab == and_optab
1941 || binoptab == xor_optab
1942 || binoptab == add_optab || binoptab == sub_optab
1943 || binoptab == smul_optab || binoptab == ashl_optab)
1944 && class == MODE_INT)
1945 no_extend = 1;
1947 xop0 = widen_operand (xop0, wider_mode, mode,
1948 unsignedp, no_extend);
1950 /* The second operand of a shift must always be extended. */
1951 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1952 no_extend && binoptab != ashl_optab);
1954 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1955 unsignedp, methods);
1956 if (temp)
1958 if (class != MODE_INT
1959 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1960 GET_MODE_BITSIZE (wider_mode)))
1962 if (target == 0)
1963 target = gen_reg_rtx (mode);
1964 convert_move (target, temp, 0);
1965 return target;
1967 else
1968 return gen_lowpart (mode, temp);
1970 else
1971 delete_insns_since (last);
1976 delete_insns_since (entry_last);
1977 return 0;
1980 /* Expand a binary operator which has both signed and unsigned forms.
1981 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1982 signed operations.
1984 If we widen unsigned operands, we may use a signed wider operation instead
1985 of an unsigned wider operation, since the result would be the same. */
1988 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
1989 rtx op0, rtx op1, rtx target, int unsignedp,
1990 enum optab_methods methods)
1992 rtx temp;
1993 optab direct_optab = unsignedp ? uoptab : soptab;
1994 struct optab wide_soptab;
1996 /* Do it without widening, if possible. */
1997 temp = expand_binop (mode, direct_optab, op0, op1, target,
1998 unsignedp, OPTAB_DIRECT);
1999 if (temp || methods == OPTAB_DIRECT)
2000 return temp;
2002 /* Try widening to a signed int. Make a fake signed optab that
2003 hides any signed insn for direct use. */
2004 wide_soptab = *soptab;
2005 wide_soptab.handlers[(int) mode].insn_code = CODE_FOR_nothing;
2006 wide_soptab.handlers[(int) mode].libfunc = 0;
2008 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2009 unsignedp, OPTAB_WIDEN);
2011 /* For unsigned operands, try widening to an unsigned int. */
2012 if (temp == 0 && unsignedp)
2013 temp = expand_binop (mode, uoptab, op0, op1, target,
2014 unsignedp, OPTAB_WIDEN);
2015 if (temp || methods == OPTAB_WIDEN)
2016 return temp;
2018 /* Use the right width lib call if that exists. */
2019 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
2020 if (temp || methods == OPTAB_LIB)
2021 return temp;
2023 /* Must widen and use a lib call, use either signed or unsigned. */
2024 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2025 unsignedp, methods);
2026 if (temp != 0)
2027 return temp;
2028 if (unsignedp)
2029 return expand_binop (mode, uoptab, op0, op1, target,
2030 unsignedp, methods);
2031 return 0;
2034 /* Generate code to perform an operation specified by UNOPPTAB
2035 on operand OP0, with two results to TARG0 and TARG1.
2036 We assume that the order of the operands for the instruction
2037 is TARG0, TARG1, OP0.
2039 Either TARG0 or TARG1 may be zero, but what that means is that
2040 the result is not actually wanted. We will generate it into
2041 a dummy pseudo-reg and discard it. They may not both be zero.
2043 Returns 1 if this operation can be performed; 0 if not. */
2046 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
2047 int unsignedp)
2049 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2050 enum mode_class class;
2051 enum machine_mode wider_mode;
2052 rtx entry_last = get_last_insn ();
2053 rtx last;
2055 class = GET_MODE_CLASS (mode);
2057 if (!targ0)
2058 targ0 = gen_reg_rtx (mode);
2059 if (!targ1)
2060 targ1 = gen_reg_rtx (mode);
2062 /* Record where to go back to if we fail. */
2063 last = get_last_insn ();
2065 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2067 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2068 enum machine_mode mode0 = insn_data[icode].operand[2].mode;
2069 rtx pat;
2070 rtx xop0 = op0;
2072 if (GET_MODE (xop0) != VOIDmode
2073 && GET_MODE (xop0) != mode0)
2074 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2076 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2077 if (!insn_data[icode].operand[2].predicate (xop0, mode0))
2078 xop0 = copy_to_mode_reg (mode0, xop0);
2080 /* We could handle this, but we should always be called with a pseudo
2081 for our targets and all insns should take them as outputs. */
2082 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2083 gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
2085 pat = GEN_FCN (icode) (targ0, targ1, xop0);
2086 if (pat)
2088 emit_insn (pat);
2089 return 1;
2091 else
2092 delete_insns_since (last);
2095 /* It can't be done in this mode. Can we do it in a wider mode? */
2097 if (CLASS_HAS_WIDER_MODES_P (class))
2099 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2100 wider_mode != VOIDmode;
2101 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2103 if (unoptab->handlers[(int) wider_mode].insn_code
2104 != CODE_FOR_nothing)
2106 rtx t0 = gen_reg_rtx (wider_mode);
2107 rtx t1 = gen_reg_rtx (wider_mode);
2108 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2110 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
2112 convert_move (targ0, t0, unsignedp);
2113 convert_move (targ1, t1, unsignedp);
2114 return 1;
2116 else
2117 delete_insns_since (last);
2122 delete_insns_since (entry_last);
2123 return 0;
2126 /* Generate code to perform an operation specified by BINOPTAB
2127 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2128 We assume that the order of the operands for the instruction
2129 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2130 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2132 Either TARG0 or TARG1 may be zero, but what that means is that
2133 the result is not actually wanted. We will generate it into
2134 a dummy pseudo-reg and discard it. They may not both be zero.
2136 Returns 1 if this operation can be performed; 0 if not. */
2139 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2140 int unsignedp)
2142 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2143 enum mode_class class;
2144 enum machine_mode wider_mode;
2145 rtx entry_last = get_last_insn ();
2146 rtx last;
2148 class = GET_MODE_CLASS (mode);
2150 /* If we are inside an appropriately-short loop and we are optimizing,
2151 force expensive constants into a register. */
2152 if (CONSTANT_P (op0) && optimize
2153 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
2154 op0 = force_reg (mode, op0);
2156 if (CONSTANT_P (op1) && optimize
2157 && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
2158 op1 = force_reg (mode, op1);
2160 if (!targ0)
2161 targ0 = gen_reg_rtx (mode);
2162 if (!targ1)
2163 targ1 = gen_reg_rtx (mode);
2165 /* Record where to go back to if we fail. */
2166 last = get_last_insn ();
2168 if (binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2170 int icode = (int) binoptab->handlers[(int) mode].insn_code;
2171 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2172 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
2173 rtx pat;
2174 rtx xop0 = op0, xop1 = op1;
2176 /* In case the insn wants input operands in modes different from
2177 those of the actual operands, convert the operands. It would
2178 seem that we don't need to convert CONST_INTs, but we do, so
2179 that they're properly zero-extended, sign-extended or truncated
2180 for their mode. */
2182 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
2183 xop0 = convert_modes (mode0,
2184 GET_MODE (op0) != VOIDmode
2185 ? GET_MODE (op0)
2186 : mode,
2187 xop0, unsignedp);
2189 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
2190 xop1 = convert_modes (mode1,
2191 GET_MODE (op1) != VOIDmode
2192 ? GET_MODE (op1)
2193 : mode,
2194 xop1, unsignedp);
2196 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2197 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2198 xop0 = copy_to_mode_reg (mode0, xop0);
2200 if (!insn_data[icode].operand[2].predicate (xop1, mode1))
2201 xop1 = copy_to_mode_reg (mode1, xop1);
2203 /* We could handle this, but we should always be called with a pseudo
2204 for our targets and all insns should take them as outputs. */
2205 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2206 gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
2208 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
2209 if (pat)
2211 emit_insn (pat);
2212 return 1;
2214 else
2215 delete_insns_since (last);
2218 /* It can't be done in this mode. Can we do it in a wider mode? */
2220 if (CLASS_HAS_WIDER_MODES_P (class))
2222 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2223 wider_mode != VOIDmode;
2224 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2226 if (binoptab->handlers[(int) wider_mode].insn_code
2227 != CODE_FOR_nothing)
2229 rtx t0 = gen_reg_rtx (wider_mode);
2230 rtx t1 = gen_reg_rtx (wider_mode);
2231 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2232 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2234 if (expand_twoval_binop (binoptab, cop0, cop1,
2235 t0, t1, unsignedp))
2237 convert_move (targ0, t0, unsignedp);
2238 convert_move (targ1, t1, unsignedp);
2239 return 1;
2241 else
2242 delete_insns_since (last);
2247 delete_insns_since (entry_last);
2248 return 0;
2251 /* Expand the two-valued library call indicated by BINOPTAB, but
2252 preserve only one of the values. If TARG0 is non-NULL, the first
2253 value is placed into TARG0; otherwise the second value is placed
2254 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2255 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2256 This routine assumes that the value returned by the library call is
2257 as if the return value was of an integral mode twice as wide as the
2258 mode of OP0. Returns 1 if the call was successful. */
2260 bool
2261 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2262 rtx targ0, rtx targ1, enum rtx_code code)
2264 enum machine_mode mode;
2265 enum machine_mode libval_mode;
2266 rtx libval;
2267 rtx insns;
2269 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2270 gcc_assert (!targ0 != !targ1);
2272 mode = GET_MODE (op0);
2273 if (!binoptab->handlers[(int) mode].libfunc)
2274 return false;
2276 /* The value returned by the library function will have twice as
2277 many bits as the nominal MODE. */
2278 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2279 MODE_INT);
2280 start_sequence ();
2281 libval = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
2282 NULL_RTX, LCT_CONST,
2283 libval_mode, 2,
2284 op0, mode,
2285 op1, mode);
2286 /* Get the part of VAL containing the value that we want. */
2287 libval = simplify_gen_subreg (mode, libval, libval_mode,
2288 targ0 ? 0 : GET_MODE_SIZE (mode));
2289 insns = get_insns ();
2290 end_sequence ();
2291 /* Move the into the desired location. */
2292 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2293 gen_rtx_fmt_ee (code, mode, op0, op1));
2295 return true;
2299 /* Wrapper around expand_unop which takes an rtx code to specify
2300 the operation to perform, not an optab pointer. All other
2301 arguments are the same. */
2303 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2304 rtx target, int unsignedp)
2306 optab unop = code_to_optab[(int) code];
2307 gcc_assert (unop);
2309 return expand_unop (mode, unop, op0, target, unsignedp);
2312 /* Try calculating
2313 (clz:narrow x)
2315 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2316 static rtx
2317 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2319 enum mode_class class = GET_MODE_CLASS (mode);
2320 if (CLASS_HAS_WIDER_MODES_P (class))
2322 enum machine_mode wider_mode;
2323 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2324 wider_mode != VOIDmode;
2325 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2327 if (clz_optab->handlers[(int) wider_mode].insn_code
2328 != CODE_FOR_nothing)
2330 rtx xop0, temp, last;
2332 last = get_last_insn ();
2334 if (target == 0)
2335 target = gen_reg_rtx (mode);
2336 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2337 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2338 if (temp != 0)
2339 temp = expand_binop (wider_mode, sub_optab, temp,
2340 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2341 - GET_MODE_BITSIZE (mode)),
2342 target, true, OPTAB_DIRECT);
2343 if (temp == 0)
2344 delete_insns_since (last);
2346 return temp;
2350 return 0;
2353 /* Try calculating (parity x) as (and (popcount x) 1), where
2354 popcount can also be done in a wider mode. */
2355 static rtx
2356 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2358 enum mode_class class = GET_MODE_CLASS (mode);
2359 if (CLASS_HAS_WIDER_MODES_P (class))
2361 enum machine_mode wider_mode;
2362 for (wider_mode = mode; wider_mode != VOIDmode;
2363 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2365 if (popcount_optab->handlers[(int) wider_mode].insn_code
2366 != CODE_FOR_nothing)
2368 rtx xop0, temp, last;
2370 last = get_last_insn ();
2372 if (target == 0)
2373 target = gen_reg_rtx (mode);
2374 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2375 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2376 true);
2377 if (temp != 0)
2378 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2379 target, true, OPTAB_DIRECT);
2380 if (temp == 0)
2381 delete_insns_since (last);
2383 return temp;
2387 return 0;
2390 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2391 conditions, VAL may already be a SUBREG against which we cannot generate
2392 a further SUBREG. In this case, we expect forcing the value into a
2393 register will work around the situation. */
2395 static rtx
2396 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2397 enum machine_mode imode)
2399 rtx ret;
2400 ret = lowpart_subreg (omode, val, imode);
2401 if (ret == NULL)
2403 val = force_reg (imode, val);
2404 ret = lowpart_subreg (omode, val, imode);
2405 gcc_assert (ret != NULL);
2407 return ret;
2410 /* Expand a floating point absolute value or negation operation via a
2411 logical operation on the sign bit. */
2413 static rtx
2414 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2415 rtx op0, rtx target)
2417 const struct real_format *fmt;
2418 int bitpos, word, nwords, i;
2419 enum machine_mode imode;
2420 HOST_WIDE_INT hi, lo;
2421 rtx temp, insns;
2423 /* The format has to have a simple sign bit. */
2424 fmt = REAL_MODE_FORMAT (mode);
2425 if (fmt == NULL)
2426 return NULL_RTX;
2428 bitpos = fmt->signbit_rw;
2429 if (bitpos < 0)
2430 return NULL_RTX;
2432 /* Don't create negative zeros if the format doesn't support them. */
2433 if (code == NEG && !fmt->has_signed_zero)
2434 return NULL_RTX;
2436 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2438 imode = int_mode_for_mode (mode);
2439 if (imode == BLKmode)
2440 return NULL_RTX;
2441 word = 0;
2442 nwords = 1;
2444 else
2446 imode = word_mode;
2448 if (FLOAT_WORDS_BIG_ENDIAN)
2449 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2450 else
2451 word = bitpos / BITS_PER_WORD;
2452 bitpos = bitpos % BITS_PER_WORD;
2453 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2456 if (bitpos < HOST_BITS_PER_WIDE_INT)
2458 hi = 0;
2459 lo = (HOST_WIDE_INT) 1 << bitpos;
2461 else
2463 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2464 lo = 0;
2466 if (code == ABS)
2467 lo = ~lo, hi = ~hi;
2469 if (target == 0 || target == op0)
2470 target = gen_reg_rtx (mode);
2472 if (nwords > 1)
2474 start_sequence ();
2476 for (i = 0; i < nwords; ++i)
2478 rtx targ_piece = operand_subword (target, i, 1, mode);
2479 rtx op0_piece = operand_subword_force (op0, i, mode);
2481 if (i == word)
2483 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2484 op0_piece,
2485 immed_double_const (lo, hi, imode),
2486 targ_piece, 1, OPTAB_LIB_WIDEN);
2487 if (temp != targ_piece)
2488 emit_move_insn (targ_piece, temp);
2490 else
2491 emit_move_insn (targ_piece, op0_piece);
2494 insns = get_insns ();
2495 end_sequence ();
2497 temp = gen_rtx_fmt_e (code, mode, copy_rtx (op0));
2498 emit_no_conflict_block (insns, target, op0, NULL_RTX, temp);
2500 else
2502 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2503 gen_lowpart (imode, op0),
2504 immed_double_const (lo, hi, imode),
2505 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2506 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2508 set_unique_reg_note (get_last_insn (), REG_EQUAL,
2509 gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
2512 return target;
2515 /* Generate code to perform an operation specified by UNOPTAB
2516 on operand OP0, with result having machine-mode MODE.
2518 UNSIGNEDP is for the case where we have to widen the operands
2519 to perform the operation. It says to use zero-extension.
2521 If TARGET is nonzero, the value
2522 is generated there, if it is convenient to do so.
2523 In all cases an rtx is returned for the locus of the value;
2524 this may or may not be TARGET. */
2527 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
2528 int unsignedp)
2530 enum mode_class class;
2531 enum machine_mode wider_mode;
2532 rtx temp;
2533 rtx last = get_last_insn ();
2534 rtx pat;
2536 class = GET_MODE_CLASS (mode);
2538 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2540 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2541 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2542 rtx xop0 = op0;
2544 if (target)
2545 temp = target;
2546 else
2547 temp = gen_reg_rtx (mode);
2549 if (GET_MODE (xop0) != VOIDmode
2550 && GET_MODE (xop0) != mode0)
2551 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2553 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2555 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2556 xop0 = copy_to_mode_reg (mode0, xop0);
2558 if (!insn_data[icode].operand[0].predicate (temp, mode))
2559 temp = gen_reg_rtx (mode);
2561 pat = GEN_FCN (icode) (temp, xop0);
2562 if (pat)
2564 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2565 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
2567 delete_insns_since (last);
2568 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2571 emit_insn (pat);
2573 return temp;
2575 else
2576 delete_insns_since (last);
2579 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2581 /* Widening clz needs special treatment. */
2582 if (unoptab == clz_optab)
2584 temp = widen_clz (mode, op0, target);
2585 if (temp)
2586 return temp;
2587 else
2588 goto try_libcall;
2591 if (CLASS_HAS_WIDER_MODES_P (class))
2592 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2593 wider_mode != VOIDmode;
2594 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2596 if (unoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing)
2598 rtx xop0 = op0;
2600 /* For certain operations, we need not actually extend
2601 the narrow operand, as long as we will truncate the
2602 results to the same narrowness. */
2604 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2605 (unoptab == neg_optab
2606 || unoptab == one_cmpl_optab)
2607 && class == MODE_INT);
2609 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2610 unsignedp);
2612 if (temp)
2614 if (class != MODE_INT
2615 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2616 GET_MODE_BITSIZE (wider_mode)))
2618 if (target == 0)
2619 target = gen_reg_rtx (mode);
2620 convert_move (target, temp, 0);
2621 return target;
2623 else
2624 return gen_lowpart (mode, temp);
2626 else
2627 delete_insns_since (last);
2631 /* These can be done a word at a time. */
2632 if (unoptab == one_cmpl_optab
2633 && class == MODE_INT
2634 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
2635 && unoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
2637 int i;
2638 rtx insns;
2640 if (target == 0 || target == op0)
2641 target = gen_reg_rtx (mode);
2643 start_sequence ();
2645 /* Do the actual arithmetic. */
2646 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
2648 rtx target_piece = operand_subword (target, i, 1, mode);
2649 rtx x = expand_unop (word_mode, unoptab,
2650 operand_subword_force (op0, i, mode),
2651 target_piece, unsignedp);
2653 if (target_piece != x)
2654 emit_move_insn (target_piece, x);
2657 insns = get_insns ();
2658 end_sequence ();
2660 emit_no_conflict_block (insns, target, op0, NULL_RTX,
2661 gen_rtx_fmt_e (unoptab->code, mode,
2662 copy_rtx (op0)));
2663 return target;
2666 if (unoptab->code == NEG)
2668 /* Try negating floating point values by flipping the sign bit. */
2669 if (SCALAR_FLOAT_MODE_P (mode))
2671 temp = expand_absneg_bit (NEG, mode, op0, target);
2672 if (temp)
2673 return temp;
2676 /* If there is no negation pattern, and we have no negative zero,
2677 try subtracting from zero. */
2678 if (!HONOR_SIGNED_ZEROS (mode))
2680 temp = expand_binop (mode, (unoptab == negv_optab
2681 ? subv_optab : sub_optab),
2682 CONST0_RTX (mode), op0, target,
2683 unsignedp, OPTAB_DIRECT);
2684 if (temp)
2685 return temp;
2689 /* Try calculating parity (x) as popcount (x) % 2. */
2690 if (unoptab == parity_optab)
2692 temp = expand_parity (mode, op0, target);
2693 if (temp)
2694 return temp;
2697 try_libcall:
2698 /* Now try a library call in this mode. */
2699 if (unoptab->handlers[(int) mode].libfunc)
2701 rtx insns;
2702 rtx value;
2703 enum machine_mode outmode = mode;
2705 /* All of these functions return small values. Thus we choose to
2706 have them return something that isn't a double-word. */
2707 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
2708 || unoptab == popcount_optab || unoptab == parity_optab)
2709 outmode
2710 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node)));
2712 start_sequence ();
2714 /* Pass 1 for NO_QUEUE so we don't lose any increments
2715 if the libcall is cse'd or moved. */
2716 value = emit_library_call_value (unoptab->handlers[(int) mode].libfunc,
2717 NULL_RTX, LCT_CONST, outmode,
2718 1, op0, mode);
2719 insns = get_insns ();
2720 end_sequence ();
2722 target = gen_reg_rtx (outmode);
2723 emit_libcall_block (insns, target, value,
2724 gen_rtx_fmt_e (unoptab->code, outmode, op0));
2726 return target;
2729 /* It can't be done in this mode. Can we do it in a wider mode? */
2731 if (CLASS_HAS_WIDER_MODES_P (class))
2733 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2734 wider_mode != VOIDmode;
2735 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2737 if ((unoptab->handlers[(int) wider_mode].insn_code
2738 != CODE_FOR_nothing)
2739 || unoptab->handlers[(int) wider_mode].libfunc)
2741 rtx xop0 = op0;
2743 /* For certain operations, we need not actually extend
2744 the narrow operand, as long as we will truncate the
2745 results to the same narrowness. */
2747 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2748 (unoptab == neg_optab
2749 || unoptab == one_cmpl_optab)
2750 && class == MODE_INT);
2752 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2753 unsignedp);
2755 /* If we are generating clz using wider mode, adjust the
2756 result. */
2757 if (unoptab == clz_optab && temp != 0)
2758 temp = expand_binop (wider_mode, sub_optab, temp,
2759 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2760 - GET_MODE_BITSIZE (mode)),
2761 target, true, OPTAB_DIRECT);
2763 if (temp)
2765 if (class != MODE_INT)
2767 if (target == 0)
2768 target = gen_reg_rtx (mode);
2769 convert_move (target, temp, 0);
2770 return target;
2772 else
2773 return gen_lowpart (mode, temp);
2775 else
2776 delete_insns_since (last);
2781 /* One final attempt at implementing negation via subtraction,
2782 this time allowing widening of the operand. */
2783 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
2785 rtx temp;
2786 temp = expand_binop (mode,
2787 unoptab == negv_optab ? subv_optab : sub_optab,
2788 CONST0_RTX (mode), op0,
2789 target, unsignedp, OPTAB_LIB_WIDEN);
2790 if (temp)
2791 return temp;
2794 return 0;
2797 /* Emit code to compute the absolute value of OP0, with result to
2798 TARGET if convenient. (TARGET may be 0.) The return value says
2799 where the result actually is to be found.
2801 MODE is the mode of the operand; the mode of the result is
2802 different but can be deduced from MODE.
2807 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
2808 int result_unsignedp)
2810 rtx temp;
2812 if (! flag_trapv)
2813 result_unsignedp = 1;
2815 /* First try to do it with a special abs instruction. */
2816 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
2817 op0, target, 0);
2818 if (temp != 0)
2819 return temp;
2821 /* For floating point modes, try clearing the sign bit. */
2822 if (SCALAR_FLOAT_MODE_P (mode))
2824 temp = expand_absneg_bit (ABS, mode, op0, target);
2825 if (temp)
2826 return temp;
2829 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2830 if (smax_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
2831 && !HONOR_SIGNED_ZEROS (mode))
2833 rtx last = get_last_insn ();
2835 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
2836 if (temp != 0)
2837 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
2838 OPTAB_WIDEN);
2840 if (temp != 0)
2841 return temp;
2843 delete_insns_since (last);
2846 /* If this machine has expensive jumps, we can do integer absolute
2847 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2848 where W is the width of MODE. */
2850 if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
2852 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
2853 size_int (GET_MODE_BITSIZE (mode) - 1),
2854 NULL_RTX, 0);
2856 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
2857 OPTAB_LIB_WIDEN);
2858 if (temp != 0)
2859 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
2860 temp, extended, target, 0, OPTAB_LIB_WIDEN);
2862 if (temp != 0)
2863 return temp;
2866 return NULL_RTX;
2870 expand_abs (enum machine_mode mode, rtx op0, rtx target,
2871 int result_unsignedp, int safe)
2873 rtx temp, op1;
2875 if (! flag_trapv)
2876 result_unsignedp = 1;
2878 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
2879 if (temp != 0)
2880 return temp;
2882 /* If that does not win, use conditional jump and negate. */
2884 /* It is safe to use the target if it is the same
2885 as the source if this is also a pseudo register */
2886 if (op0 == target && REG_P (op0)
2887 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
2888 safe = 1;
2890 op1 = gen_label_rtx ();
2891 if (target == 0 || ! safe
2892 || GET_MODE (target) != mode
2893 || (MEM_P (target) && MEM_VOLATILE_P (target))
2894 || (REG_P (target)
2895 && REGNO (target) < FIRST_PSEUDO_REGISTER))
2896 target = gen_reg_rtx (mode);
2898 emit_move_insn (target, op0);
2899 NO_DEFER_POP;
2901 /* If this mode is an integer too wide to compare properly,
2902 compare word by word. Rely on CSE to optimize constant cases. */
2903 if (GET_MODE_CLASS (mode) == MODE_INT
2904 && ! can_compare_p (GE, mode, ccp_jump))
2905 do_jump_by_parts_greater_rtx (mode, 0, target, const0_rtx,
2906 NULL_RTX, op1);
2907 else
2908 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
2909 NULL_RTX, NULL_RTX, op1);
2911 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
2912 target, target, 0);
2913 if (op0 != target)
2914 emit_move_insn (target, op0);
2915 emit_label (op1);
2916 OK_DEFER_POP;
2917 return target;
2920 /* A subroutine of expand_copysign, perform the copysign operation using the
2921 abs and neg primitives advertised to exist on the target. The assumption
2922 is that we have a split register file, and leaving op0 in fp registers,
2923 and not playing with subregs so much, will help the register allocator. */
2925 static rtx
2926 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
2927 int bitpos, bool op0_is_abs)
2929 enum machine_mode imode;
2930 HOST_WIDE_INT hi, lo;
2931 int word;
2932 rtx label;
2934 if (target == op1)
2935 target = NULL_RTX;
2937 if (!op0_is_abs)
2939 op0 = expand_unop (mode, abs_optab, op0, target, 0);
2940 if (op0 == NULL)
2941 return NULL_RTX;
2942 target = op0;
2944 else
2946 if (target == NULL_RTX)
2947 target = copy_to_reg (op0);
2948 else
2949 emit_move_insn (target, op0);
2952 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2954 imode = int_mode_for_mode (mode);
2955 if (imode == BLKmode)
2956 return NULL_RTX;
2957 op1 = gen_lowpart (imode, op1);
2959 else
2961 imode = word_mode;
2962 if (FLOAT_WORDS_BIG_ENDIAN)
2963 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2964 else
2965 word = bitpos / BITS_PER_WORD;
2966 bitpos = bitpos % BITS_PER_WORD;
2967 op1 = operand_subword_force (op1, word, mode);
2970 if (bitpos < HOST_BITS_PER_WIDE_INT)
2972 hi = 0;
2973 lo = (HOST_WIDE_INT) 1 << bitpos;
2975 else
2977 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2978 lo = 0;
2981 op1 = expand_binop (imode, and_optab, op1,
2982 immed_double_const (lo, hi, imode),
2983 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2985 label = gen_label_rtx ();
2986 emit_cmp_and_jump_insns (op1, const0_rtx, EQ, NULL_RTX, imode, 1, label);
2988 if (GET_CODE (op0) == CONST_DOUBLE)
2989 op0 = simplify_unary_operation (NEG, mode, op0, mode);
2990 else
2991 op0 = expand_unop (mode, neg_optab, op0, target, 0);
2992 if (op0 != target)
2993 emit_move_insn (target, op0);
2995 emit_label (label);
2997 return target;
3001 /* A subroutine of expand_copysign, perform the entire copysign operation
3002 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3003 is true if op0 is known to have its sign bit clear. */
3005 static rtx
3006 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3007 int bitpos, bool op0_is_abs)
3009 enum machine_mode imode;
3010 HOST_WIDE_INT hi, lo;
3011 int word, nwords, i;
3012 rtx temp, insns;
3014 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3016 imode = int_mode_for_mode (mode);
3017 if (imode == BLKmode)
3018 return NULL_RTX;
3019 word = 0;
3020 nwords = 1;
3022 else
3024 imode = word_mode;
3026 if (FLOAT_WORDS_BIG_ENDIAN)
3027 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3028 else
3029 word = bitpos / BITS_PER_WORD;
3030 bitpos = bitpos % BITS_PER_WORD;
3031 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3034 if (bitpos < HOST_BITS_PER_WIDE_INT)
3036 hi = 0;
3037 lo = (HOST_WIDE_INT) 1 << bitpos;
3039 else
3041 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3042 lo = 0;
3045 if (target == 0 || target == op0 || target == op1)
3046 target = gen_reg_rtx (mode);
3048 if (nwords > 1)
3050 start_sequence ();
3052 for (i = 0; i < nwords; ++i)
3054 rtx targ_piece = operand_subword (target, i, 1, mode);
3055 rtx op0_piece = operand_subword_force (op0, i, mode);
3057 if (i == word)
3059 if (!op0_is_abs)
3060 op0_piece = expand_binop (imode, and_optab, op0_piece,
3061 immed_double_const (~lo, ~hi, imode),
3062 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3064 op1 = expand_binop (imode, and_optab,
3065 operand_subword_force (op1, i, mode),
3066 immed_double_const (lo, hi, imode),
3067 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3069 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3070 targ_piece, 1, OPTAB_LIB_WIDEN);
3071 if (temp != targ_piece)
3072 emit_move_insn (targ_piece, temp);
3074 else
3075 emit_move_insn (targ_piece, op0_piece);
3078 insns = get_insns ();
3079 end_sequence ();
3081 emit_no_conflict_block (insns, target, op0, op1, NULL_RTX);
3083 else
3085 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3086 immed_double_const (lo, hi, imode),
3087 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3089 op0 = gen_lowpart (imode, op0);
3090 if (!op0_is_abs)
3091 op0 = expand_binop (imode, and_optab, op0,
3092 immed_double_const (~lo, ~hi, imode),
3093 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3095 temp = expand_binop (imode, ior_optab, op0, op1,
3096 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3097 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3100 return target;
3103 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3104 scalar floating point mode. Return NULL if we do not know how to
3105 expand the operation inline. */
3108 expand_copysign (rtx op0, rtx op1, rtx target)
3110 enum machine_mode mode = GET_MODE (op0);
3111 const struct real_format *fmt;
3112 bool op0_is_abs;
3113 rtx temp;
3115 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3116 gcc_assert (GET_MODE (op1) == mode);
3118 /* First try to do it with a special instruction. */
3119 temp = expand_binop (mode, copysign_optab, op0, op1,
3120 target, 0, OPTAB_DIRECT);
3121 if (temp)
3122 return temp;
3124 fmt = REAL_MODE_FORMAT (mode);
3125 if (fmt == NULL || !fmt->has_signed_zero)
3126 return NULL_RTX;
3128 op0_is_abs = false;
3129 if (GET_CODE (op0) == CONST_DOUBLE)
3131 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3132 op0 = simplify_unary_operation (ABS, mode, op0, mode);
3133 op0_is_abs = true;
3136 if (fmt->signbit_ro >= 0
3137 && (GET_CODE (op0) == CONST_DOUBLE
3138 || (neg_optab->handlers[mode].insn_code != CODE_FOR_nothing
3139 && abs_optab->handlers[mode].insn_code != CODE_FOR_nothing)))
3141 temp = expand_copysign_absneg (mode, op0, op1, target,
3142 fmt->signbit_ro, op0_is_abs);
3143 if (temp)
3144 return temp;
3147 if (fmt->signbit_rw < 0)
3148 return NULL_RTX;
3149 return expand_copysign_bit (mode, op0, op1, target,
3150 fmt->signbit_rw, op0_is_abs);
3153 /* Generate an instruction whose insn-code is INSN_CODE,
3154 with two operands: an output TARGET and an input OP0.
3155 TARGET *must* be nonzero, and the output is always stored there.
3156 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3157 the value that is stored into TARGET. */
3159 void
3160 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
3162 rtx temp;
3163 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3164 rtx pat;
3166 temp = target;
3168 /* Now, if insn does not accept our operands, put them into pseudos. */
3170 if (!insn_data[icode].operand[1].predicate (op0, mode0))
3171 op0 = copy_to_mode_reg (mode0, op0);
3173 if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp)))
3174 temp = gen_reg_rtx (GET_MODE (temp));
3176 pat = GEN_FCN (icode) (temp, op0);
3178 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3179 add_equal_note (pat, temp, code, op0, NULL_RTX);
3181 emit_insn (pat);
3183 if (temp != target)
3184 emit_move_insn (target, temp);
3187 struct no_conflict_data
3189 rtx target, first, insn;
3190 bool must_stay;
3193 /* Called via note_stores by emit_no_conflict_block and emit_libcall_block.
3194 Set P->must_stay if the currently examined clobber / store has to stay
3195 in the list of insns that constitute the actual no_conflict block /
3196 libcall block. */
3197 static void
3198 no_conflict_move_test (rtx dest, rtx set, void *p0)
3200 struct no_conflict_data *p= p0;
3202 /* If this inns directly contributes to setting the target, it must stay. */
3203 if (reg_overlap_mentioned_p (p->target, dest))
3204 p->must_stay = true;
3205 /* If we haven't committed to keeping any other insns in the list yet,
3206 there is nothing more to check. */
3207 else if (p->insn == p->first)
3208 return;
3209 /* If this insn sets / clobbers a register that feeds one of the insns
3210 already in the list, this insn has to stay too. */
3211 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3212 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3213 || reg_used_between_p (dest, p->first, p->insn)
3214 /* Likewise if this insn depends on a register set by a previous
3215 insn in the list, or if it sets a result (presumably a hard
3216 register) that is set or clobbered by a previous insn.
3217 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3218 SET_DEST perform the former check on the address, and the latter
3219 check on the MEM. */
3220 || (GET_CODE (set) == SET
3221 && (modified_in_p (SET_SRC (set), p->first)
3222 || modified_in_p (SET_DEST (set), p->first)
3223 || modified_between_p (SET_SRC (set), p->first, p->insn)
3224 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3225 p->must_stay = true;
3228 /* Emit code to perform a series of operations on a multi-word quantity, one
3229 word at a time.
3231 Such a block is preceded by a CLOBBER of the output, consists of multiple
3232 insns, each setting one word of the output, and followed by a SET copying
3233 the output to itself.
3235 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3236 note indicating that it doesn't conflict with the (also multi-word)
3237 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3238 notes.
3240 INSNS is a block of code generated to perform the operation, not including
3241 the CLOBBER and final copy. All insns that compute intermediate values
3242 are first emitted, followed by the block as described above.
3244 TARGET, OP0, and OP1 are the output and inputs of the operations,
3245 respectively. OP1 may be zero for a unary operation.
3247 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3248 on the last insn.
3250 If TARGET is not a register, INSNS is simply emitted with no special
3251 processing. Likewise if anything in INSNS is not an INSN or if
3252 there is a libcall block inside INSNS.
3254 The final insn emitted is returned. */
3257 emit_no_conflict_block (rtx insns, rtx target, rtx op0, rtx op1, rtx equiv)
3259 rtx prev, next, first, last, insn;
3261 if (!REG_P (target) || reload_in_progress)
3262 return emit_insn (insns);
3263 else
3264 for (insn = insns; insn; insn = NEXT_INSN (insn))
3265 if (!NONJUMP_INSN_P (insn)
3266 || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
3267 return emit_insn (insns);
3269 /* First emit all insns that do not store into words of the output and remove
3270 these from the list. */
3271 for (insn = insns; insn; insn = next)
3273 rtx note;
3274 struct no_conflict_data data;
3276 next = NEXT_INSN (insn);
3278 /* Some ports (cris) create a libcall regions at their own. We must
3279 avoid any potential nesting of LIBCALLs. */
3280 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3281 remove_note (insn, note);
3282 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3283 remove_note (insn, note);
3285 data.target = target;
3286 data.first = insns;
3287 data.insn = insn;
3288 data.must_stay = 0;
3289 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3290 if (! data.must_stay)
3292 if (PREV_INSN (insn))
3293 NEXT_INSN (PREV_INSN (insn)) = next;
3294 else
3295 insns = next;
3297 if (next)
3298 PREV_INSN (next) = PREV_INSN (insn);
3300 add_insn (insn);
3304 prev = get_last_insn ();
3306 /* Now write the CLOBBER of the output, followed by the setting of each
3307 of the words, followed by the final copy. */
3308 if (target != op0 && target != op1)
3309 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
3311 for (insn = insns; insn; insn = next)
3313 next = NEXT_INSN (insn);
3314 add_insn (insn);
3316 if (op1 && REG_P (op1))
3317 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op1,
3318 REG_NOTES (insn));
3320 if (op0 && REG_P (op0))
3321 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op0,
3322 REG_NOTES (insn));
3325 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3326 != CODE_FOR_nothing)
3328 last = emit_move_insn (target, target);
3329 if (equiv)
3330 set_unique_reg_note (last, REG_EQUAL, equiv);
3332 else
3334 last = get_last_insn ();
3336 /* Remove any existing REG_EQUAL note from "last", or else it will
3337 be mistaken for a note referring to the full contents of the
3338 alleged libcall value when found together with the REG_RETVAL
3339 note added below. An existing note can come from an insn
3340 expansion at "last". */
3341 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3344 if (prev == 0)
3345 first = get_insns ();
3346 else
3347 first = NEXT_INSN (prev);
3349 /* Encapsulate the block so it gets manipulated as a unit. */
3350 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3351 REG_NOTES (first));
3352 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first, REG_NOTES (last));
3354 return last;
3357 /* Emit code to make a call to a constant function or a library call.
3359 INSNS is a list containing all insns emitted in the call.
3360 These insns leave the result in RESULT. Our block is to copy RESULT
3361 to TARGET, which is logically equivalent to EQUIV.
3363 We first emit any insns that set a pseudo on the assumption that these are
3364 loading constants into registers; doing so allows them to be safely cse'ed
3365 between blocks. Then we emit all the other insns in the block, followed by
3366 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3367 note with an operand of EQUIV.
3369 Moving assignments to pseudos outside of the block is done to improve
3370 the generated code, but is not required to generate correct code,
3371 hence being unable to move an assignment is not grounds for not making
3372 a libcall block. There are two reasons why it is safe to leave these
3373 insns inside the block: First, we know that these pseudos cannot be
3374 used in generated RTL outside the block since they are created for
3375 temporary purposes within the block. Second, CSE will not record the
3376 values of anything set inside a libcall block, so we know they must
3377 be dead at the end of the block.
3379 Except for the first group of insns (the ones setting pseudos), the
3380 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3382 void
3383 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3385 rtx final_dest = target;
3386 rtx prev, next, first, last, insn;
3388 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3389 into a MEM later. Protect the libcall block from this change. */
3390 if (! REG_P (target) || REG_USERVAR_P (target))
3391 target = gen_reg_rtx (GET_MODE (target));
3393 /* If we're using non-call exceptions, a libcall corresponding to an
3394 operation that may trap may also trap. */
3395 if (flag_non_call_exceptions && may_trap_p (equiv))
3397 for (insn = insns; insn; insn = NEXT_INSN (insn))
3398 if (CALL_P (insn))
3400 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3402 if (note != 0 && INTVAL (XEXP (note, 0)) <= 0)
3403 remove_note (insn, note);
3406 else
3407 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3408 reg note to indicate that this call cannot throw or execute a nonlocal
3409 goto (unless there is already a REG_EH_REGION note, in which case
3410 we update it). */
3411 for (insn = insns; insn; insn = NEXT_INSN (insn))
3412 if (CALL_P (insn))
3414 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3416 if (note != 0)
3417 XEXP (note, 0) = constm1_rtx;
3418 else
3419 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx,
3420 REG_NOTES (insn));
3423 /* First emit all insns that set pseudos. Remove them from the list as
3424 we go. Avoid insns that set pseudos which were referenced in previous
3425 insns. These can be generated by move_by_pieces, for example,
3426 to update an address. Similarly, avoid insns that reference things
3427 set in previous insns. */
3429 for (insn = insns; insn; insn = next)
3431 rtx set = single_set (insn);
3432 rtx note;
3434 /* Some ports (cris) create a libcall regions at their own. We must
3435 avoid any potential nesting of LIBCALLs. */
3436 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3437 remove_note (insn, note);
3438 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3439 remove_note (insn, note);
3441 next = NEXT_INSN (insn);
3443 if (set != 0 && REG_P (SET_DEST (set))
3444 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3446 struct no_conflict_data data;
3448 data.target = const0_rtx;
3449 data.first = insns;
3450 data.insn = insn;
3451 data.must_stay = 0;
3452 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3453 if (! data.must_stay)
3455 if (PREV_INSN (insn))
3456 NEXT_INSN (PREV_INSN (insn)) = next;
3457 else
3458 insns = next;
3460 if (next)
3461 PREV_INSN (next) = PREV_INSN (insn);
3463 add_insn (insn);
3467 /* Some ports use a loop to copy large arguments onto the stack.
3468 Don't move anything outside such a loop. */
3469 if (LABEL_P (insn))
3470 break;
3473 prev = get_last_insn ();
3475 /* Write the remaining insns followed by the final copy. */
3477 for (insn = insns; insn; insn = next)
3479 next = NEXT_INSN (insn);
3481 add_insn (insn);
3484 last = emit_move_insn (target, result);
3485 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3486 != CODE_FOR_nothing)
3487 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
3488 else
3490 /* Remove any existing REG_EQUAL note from "last", or else it will
3491 be mistaken for a note referring to the full contents of the
3492 libcall value when found together with the REG_RETVAL note added
3493 below. An existing note can come from an insn expansion at
3494 "last". */
3495 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3498 if (final_dest != target)
3499 emit_move_insn (final_dest, target);
3501 if (prev == 0)
3502 first = get_insns ();
3503 else
3504 first = NEXT_INSN (prev);
3506 /* Encapsulate the block so it gets manipulated as a unit. */
3507 if (!flag_non_call_exceptions || !may_trap_p (equiv))
3509 /* We can't attach the REG_LIBCALL and REG_RETVAL notes
3510 when the encapsulated region would not be in one basic block,
3511 i.e. when there is a control_flow_insn_p insn between FIRST and LAST.
3513 bool attach_libcall_retval_notes = true;
3514 next = NEXT_INSN (last);
3515 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3516 if (control_flow_insn_p (insn))
3518 attach_libcall_retval_notes = false;
3519 break;
3522 if (attach_libcall_retval_notes)
3524 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3525 REG_NOTES (first));
3526 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first,
3527 REG_NOTES (last));
3532 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3533 PURPOSE describes how this comparison will be used. CODE is the rtx
3534 comparison code we will be using.
3536 ??? Actually, CODE is slightly weaker than that. A target is still
3537 required to implement all of the normal bcc operations, but not
3538 required to implement all (or any) of the unordered bcc operations. */
3541 can_compare_p (enum rtx_code code, enum machine_mode mode,
3542 enum can_compare_purpose purpose)
3546 if (cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3548 if (purpose == ccp_jump)
3549 return bcc_gen_fctn[(int) code] != NULL;
3550 else if (purpose == ccp_store_flag)
3551 return setcc_gen_code[(int) code] != CODE_FOR_nothing;
3552 else
3553 /* There's only one cmov entry point, and it's allowed to fail. */
3554 return 1;
3556 if (purpose == ccp_jump
3557 && cbranch_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3558 return 1;
3559 if (purpose == ccp_cmov
3560 && cmov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3561 return 1;
3562 if (purpose == ccp_store_flag
3563 && cstore_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3564 return 1;
3565 mode = GET_MODE_WIDER_MODE (mode);
3567 while (mode != VOIDmode);
3569 return 0;
3572 /* This function is called when we are going to emit a compare instruction that
3573 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3575 *PMODE is the mode of the inputs (in case they are const_int).
3576 *PUNSIGNEDP nonzero says that the operands are unsigned;
3577 this matters if they need to be widened.
3579 If they have mode BLKmode, then SIZE specifies the size of both operands.
3581 This function performs all the setup necessary so that the caller only has
3582 to emit a single comparison insn. This setup can involve doing a BLKmode
3583 comparison or emitting a library call to perform the comparison if no insn
3584 is available to handle it.
3585 The values which are passed in through pointers can be modified; the caller
3586 should perform the comparison on the modified values. Constant
3587 comparisons must have already been folded. */
3589 static void
3590 prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
3591 enum machine_mode *pmode, int *punsignedp,
3592 enum can_compare_purpose purpose)
3594 enum machine_mode mode = *pmode;
3595 rtx x = *px, y = *py;
3596 int unsignedp = *punsignedp;
3598 /* If we are inside an appropriately-short loop and we are optimizing,
3599 force expensive constants into a register. */
3600 if (CONSTANT_P (x) && optimize
3601 && rtx_cost (x, COMPARE) > COSTS_N_INSNS (1))
3602 x = force_reg (mode, x);
3604 if (CONSTANT_P (y) && optimize
3605 && rtx_cost (y, COMPARE) > COSTS_N_INSNS (1))
3606 y = force_reg (mode, y);
3608 #ifdef HAVE_cc0
3609 /* Make sure if we have a canonical comparison. The RTL
3610 documentation states that canonical comparisons are required only
3611 for targets which have cc0. */
3612 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3613 #endif
3615 /* Don't let both operands fail to indicate the mode. */
3616 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3617 x = force_reg (mode, x);
3619 /* Handle all BLKmode compares. */
3621 if (mode == BLKmode)
3623 enum machine_mode cmp_mode, result_mode;
3624 enum insn_code cmp_code;
3625 tree length_type;
3626 rtx libfunc;
3627 rtx result;
3628 rtx opalign
3629 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3631 gcc_assert (size);
3633 /* Try to use a memory block compare insn - either cmpstr
3634 or cmpmem will do. */
3635 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
3636 cmp_mode != VOIDmode;
3637 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
3639 cmp_code = cmpmem_optab[cmp_mode];
3640 if (cmp_code == CODE_FOR_nothing)
3641 cmp_code = cmpstr_optab[cmp_mode];
3642 if (cmp_code == CODE_FOR_nothing)
3643 cmp_code = cmpstrn_optab[cmp_mode];
3644 if (cmp_code == CODE_FOR_nothing)
3645 continue;
3647 /* Must make sure the size fits the insn's mode. */
3648 if ((GET_CODE (size) == CONST_INT
3649 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
3650 || (GET_MODE_BITSIZE (GET_MODE (size))
3651 > GET_MODE_BITSIZE (cmp_mode)))
3652 continue;
3654 result_mode = insn_data[cmp_code].operand[0].mode;
3655 result = gen_reg_rtx (result_mode);
3656 size = convert_to_mode (cmp_mode, size, 1);
3657 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
3659 *px = result;
3660 *py = const0_rtx;
3661 *pmode = result_mode;
3662 return;
3665 /* Otherwise call a library function, memcmp. */
3666 libfunc = memcmp_libfunc;
3667 length_type = sizetype;
3668 result_mode = TYPE_MODE (integer_type_node);
3669 cmp_mode = TYPE_MODE (length_type);
3670 size = convert_to_mode (TYPE_MODE (length_type), size,
3671 TYPE_UNSIGNED (length_type));
3673 result = emit_library_call_value (libfunc, 0, LCT_PURE_MAKE_BLOCK,
3674 result_mode, 3,
3675 XEXP (x, 0), Pmode,
3676 XEXP (y, 0), Pmode,
3677 size, cmp_mode);
3678 *px = result;
3679 *py = const0_rtx;
3680 *pmode = result_mode;
3681 return;
3684 /* Don't allow operands to the compare to trap, as that can put the
3685 compare and branch in different basic blocks. */
3686 if (flag_non_call_exceptions)
3688 if (may_trap_p (x))
3689 x = force_reg (mode, x);
3690 if (may_trap_p (y))
3691 y = force_reg (mode, y);
3694 *px = x;
3695 *py = y;
3696 if (can_compare_p (*pcomparison, mode, purpose))
3697 return;
3699 /* Handle a lib call just for the mode we are using. */
3701 if (cmp_optab->handlers[(int) mode].libfunc && !SCALAR_FLOAT_MODE_P (mode))
3703 rtx libfunc = cmp_optab->handlers[(int) mode].libfunc;
3704 rtx result;
3706 /* If we want unsigned, and this mode has a distinct unsigned
3707 comparison routine, use that. */
3708 if (unsignedp && ucmp_optab->handlers[(int) mode].libfunc)
3709 libfunc = ucmp_optab->handlers[(int) mode].libfunc;
3711 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK,
3712 word_mode, 2, x, mode, y, mode);
3714 *px = result;
3715 *pmode = word_mode;
3716 if (TARGET_LIB_INT_CMP_BIASED)
3717 /* Integer comparison returns a result that must be compared
3718 against 1, so that even if we do an unsigned compare
3719 afterward, there is still a value that can represent the
3720 result "less than". */
3721 *py = const1_rtx;
3722 else
3724 *py = const0_rtx;
3725 *punsignedp = 1;
3727 return;
3730 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3731 prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
3734 /* Before emitting an insn with code ICODE, make sure that X, which is going
3735 to be used for operand OPNUM of the insn, is converted from mode MODE to
3736 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3737 that it is accepted by the operand predicate. Return the new value. */
3739 static rtx
3740 prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
3741 enum machine_mode wider_mode, int unsignedp)
3743 if (mode != wider_mode)
3744 x = convert_modes (wider_mode, mode, x, unsignedp);
3746 if (!insn_data[icode].operand[opnum].predicate
3747 (x, insn_data[icode].operand[opnum].mode))
3749 if (no_new_pseudos)
3750 return NULL_RTX;
3751 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x);
3754 return x;
3757 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3758 we can do the comparison.
3759 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3760 be NULL_RTX which indicates that only a comparison is to be generated. */
3762 static void
3763 emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
3764 enum rtx_code comparison, int unsignedp, rtx label)
3766 rtx test = gen_rtx_fmt_ee (comparison, mode, x, y);
3767 enum mode_class class = GET_MODE_CLASS (mode);
3768 enum machine_mode wider_mode = mode;
3770 /* Try combined insns first. */
3773 enum insn_code icode;
3774 PUT_MODE (test, wider_mode);
3776 if (label)
3778 icode = cbranch_optab->handlers[(int) wider_mode].insn_code;
3780 if (icode != CODE_FOR_nothing
3781 && insn_data[icode].operand[0].predicate (test, wider_mode))
3783 x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp);
3784 y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp);
3785 emit_jump_insn (GEN_FCN (icode) (test, x, y, label));
3786 return;
3790 /* Handle some compares against zero. */
3791 icode = (int) tst_optab->handlers[(int) wider_mode].insn_code;
3792 if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing)
3794 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3795 emit_insn (GEN_FCN (icode) (x));
3796 if (label)
3797 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3798 return;
3801 /* Handle compares for which there is a directly suitable insn. */
3803 icode = (int) cmp_optab->handlers[(int) wider_mode].insn_code;
3804 if (icode != CODE_FOR_nothing)
3806 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3807 y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp);
3808 emit_insn (GEN_FCN (icode) (x, y));
3809 if (label)
3810 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3811 return;
3814 if (!CLASS_HAS_WIDER_MODES_P (class))
3815 break;
3817 wider_mode = GET_MODE_WIDER_MODE (wider_mode);
3819 while (wider_mode != VOIDmode);
3821 gcc_unreachable ();
3824 /* Generate code to compare X with Y so that the condition codes are
3825 set and to jump to LABEL if the condition is true. If X is a
3826 constant and Y is not a constant, then the comparison is swapped to
3827 ensure that the comparison RTL has the canonical form.
3829 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3830 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3831 the proper branch condition code.
3833 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3835 MODE is the mode of the inputs (in case they are const_int).
3837 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3838 be passed unchanged to emit_cmp_insn, then potentially converted into an
3839 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3841 void
3842 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
3843 enum machine_mode mode, int unsignedp, rtx label)
3845 rtx op0 = x, op1 = y;
3847 /* Swap operands and condition to ensure canonical RTL. */
3848 if (swap_commutative_operands_p (x, y))
3850 /* If we're not emitting a branch, this means some caller
3851 is out of sync. */
3852 gcc_assert (label);
3854 op0 = y, op1 = x;
3855 comparison = swap_condition (comparison);
3858 #ifdef HAVE_cc0
3859 /* If OP0 is still a constant, then both X and Y must be constants.
3860 Force X into a register to create canonical RTL. */
3861 if (CONSTANT_P (op0))
3862 op0 = force_reg (mode, op0);
3863 #endif
3865 if (unsignedp)
3866 comparison = unsigned_condition (comparison);
3868 prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp,
3869 ccp_jump);
3870 emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label);
3873 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3875 void
3876 emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
3877 enum machine_mode mode, int unsignedp)
3879 emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0);
3882 /* Emit a library call comparison between floating point X and Y.
3883 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3885 static void
3886 prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
3887 enum machine_mode *pmode, int *punsignedp)
3889 enum rtx_code comparison = *pcomparison;
3890 enum rtx_code swapped = swap_condition (comparison);
3891 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
3892 rtx x = *px;
3893 rtx y = *py;
3894 enum machine_mode orig_mode = GET_MODE (x);
3895 enum machine_mode mode;
3896 rtx value, target, insns, equiv;
3897 rtx libfunc = 0;
3898 bool reversed_p = false;
3900 for (mode = orig_mode;
3901 mode != VOIDmode;
3902 mode = GET_MODE_WIDER_MODE (mode))
3904 if ((libfunc = code_to_optab[comparison]->handlers[mode].libfunc))
3905 break;
3907 if ((libfunc = code_to_optab[swapped]->handlers[mode].libfunc))
3909 rtx tmp;
3910 tmp = x; x = y; y = tmp;
3911 comparison = swapped;
3912 break;
3915 if ((libfunc = code_to_optab[reversed]->handlers[mode].libfunc)
3916 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, reversed))
3918 comparison = reversed;
3919 reversed_p = true;
3920 break;
3924 gcc_assert (mode != VOIDmode);
3926 if (mode != orig_mode)
3928 x = convert_to_mode (mode, x, 0);
3929 y = convert_to_mode (mode, y, 0);
3932 /* Attach a REG_EQUAL note describing the semantics of the libcall to
3933 the RTL. The allows the RTL optimizers to delete the libcall if the
3934 condition can be determined at compile-time. */
3935 if (comparison == UNORDERED)
3937 rtx temp = simplify_gen_relational (NE, word_mode, mode, x, x);
3938 equiv = simplify_gen_relational (NE, word_mode, mode, y, y);
3939 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
3940 temp, const_true_rtx, equiv);
3942 else
3944 equiv = simplify_gen_relational (comparison, word_mode, mode, x, y);
3945 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
3947 rtx true_rtx, false_rtx;
3949 switch (comparison)
3951 case EQ:
3952 true_rtx = const0_rtx;
3953 false_rtx = const_true_rtx;
3954 break;
3956 case NE:
3957 true_rtx = const_true_rtx;
3958 false_rtx = const0_rtx;
3959 break;
3961 case GT:
3962 true_rtx = const1_rtx;
3963 false_rtx = const0_rtx;
3964 break;
3966 case GE:
3967 true_rtx = const0_rtx;
3968 false_rtx = constm1_rtx;
3969 break;
3971 case LT:
3972 true_rtx = constm1_rtx;
3973 false_rtx = const0_rtx;
3974 break;
3976 case LE:
3977 true_rtx = const0_rtx;
3978 false_rtx = const1_rtx;
3979 break;
3981 default:
3982 gcc_unreachable ();
3984 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
3985 equiv, true_rtx, false_rtx);
3989 start_sequence ();
3990 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
3991 word_mode, 2, x, mode, y, mode);
3992 insns = get_insns ();
3993 end_sequence ();
3995 target = gen_reg_rtx (word_mode);
3996 emit_libcall_block (insns, target, value, equiv);
3998 if (comparison == UNORDERED
3999 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4000 comparison = reversed_p ? EQ : NE;
4002 *px = target;
4003 *py = const0_rtx;
4004 *pmode = word_mode;
4005 *pcomparison = comparison;
4006 *punsignedp = 0;
4009 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4011 void
4012 emit_indirect_jump (rtx loc)
4014 if (!insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate
4015 (loc, Pmode))
4016 loc = copy_to_mode_reg (Pmode, loc);
4018 emit_jump_insn (gen_indirect_jump (loc));
4019 emit_barrier ();
4022 #ifdef HAVE_conditional_move
4024 /* Emit a conditional move instruction if the machine supports one for that
4025 condition and machine mode.
4027 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4028 the mode to use should they be constants. If it is VOIDmode, they cannot
4029 both be constants.
4031 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4032 should be stored there. MODE is the mode to use should they be constants.
4033 If it is VOIDmode, they cannot both be constants.
4035 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4036 is not supported. */
4039 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4040 enum machine_mode cmode, rtx op2, rtx op3,
4041 enum machine_mode mode, int unsignedp)
4043 rtx tem, subtarget, comparison, insn;
4044 enum insn_code icode;
4045 enum rtx_code reversed;
4047 /* If one operand is constant, make it the second one. Only do this
4048 if the other operand is not constant as well. */
4050 if (swap_commutative_operands_p (op0, op1))
4052 tem = op0;
4053 op0 = op1;
4054 op1 = tem;
4055 code = swap_condition (code);
4058 /* get_condition will prefer to generate LT and GT even if the old
4059 comparison was against zero, so undo that canonicalization here since
4060 comparisons against zero are cheaper. */
4061 if (code == LT && op1 == const1_rtx)
4062 code = LE, op1 = const0_rtx;
4063 else if (code == GT && op1 == constm1_rtx)
4064 code = GE, op1 = const0_rtx;
4066 if (cmode == VOIDmode)
4067 cmode = GET_MODE (op0);
4069 if (swap_commutative_operands_p (op2, op3)
4070 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4071 != UNKNOWN))
4073 tem = op2;
4074 op2 = op3;
4075 op3 = tem;
4076 code = reversed;
4079 if (mode == VOIDmode)
4080 mode = GET_MODE (op2);
4082 icode = movcc_gen_code[mode];
4084 if (icode == CODE_FOR_nothing)
4085 return 0;
4087 if (!target)
4088 target = gen_reg_rtx (mode);
4090 subtarget = target;
4092 /* If the insn doesn't accept these operands, put them in pseudos. */
4094 if (!insn_data[icode].operand[0].predicate
4095 (subtarget, insn_data[icode].operand[0].mode))
4096 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4098 if (!insn_data[icode].operand[2].predicate
4099 (op2, insn_data[icode].operand[2].mode))
4100 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4102 if (!insn_data[icode].operand[3].predicate
4103 (op3, insn_data[icode].operand[3].mode))
4104 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4106 /* Everything should now be in the suitable form, so emit the compare insn
4107 and then the conditional move. */
4109 comparison
4110 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4112 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4113 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4114 return NULL and let the caller figure out how best to deal with this
4115 situation. */
4116 if (GET_CODE (comparison) != code)
4117 return NULL_RTX;
4119 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4121 /* If that failed, then give up. */
4122 if (insn == 0)
4123 return 0;
4125 emit_insn (insn);
4127 if (subtarget != target)
4128 convert_move (target, subtarget, 0);
4130 return target;
4133 /* Return nonzero if a conditional move of mode MODE is supported.
4135 This function is for combine so it can tell whether an insn that looks
4136 like a conditional move is actually supported by the hardware. If we
4137 guess wrong we lose a bit on optimization, but that's it. */
4138 /* ??? sparc64 supports conditionally moving integers values based on fp
4139 comparisons, and vice versa. How do we handle them? */
4142 can_conditionally_move_p (enum machine_mode mode)
4144 if (movcc_gen_code[mode] != CODE_FOR_nothing)
4145 return 1;
4147 return 0;
4150 #endif /* HAVE_conditional_move */
4152 /* Emit a conditional addition instruction if the machine supports one for that
4153 condition and machine mode.
4155 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4156 the mode to use should they be constants. If it is VOIDmode, they cannot
4157 both be constants.
4159 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4160 should be stored there. MODE is the mode to use should they be constants.
4161 If it is VOIDmode, they cannot both be constants.
4163 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4164 is not supported. */
4167 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4168 enum machine_mode cmode, rtx op2, rtx op3,
4169 enum machine_mode mode, int unsignedp)
4171 rtx tem, subtarget, comparison, insn;
4172 enum insn_code icode;
4173 enum rtx_code reversed;
4175 /* If one operand is constant, make it the second one. Only do this
4176 if the other operand is not constant as well. */
4178 if (swap_commutative_operands_p (op0, op1))
4180 tem = op0;
4181 op0 = op1;
4182 op1 = tem;
4183 code = swap_condition (code);
4186 /* get_condition will prefer to generate LT and GT even if the old
4187 comparison was against zero, so undo that canonicalization here since
4188 comparisons against zero are cheaper. */
4189 if (code == LT && op1 == const1_rtx)
4190 code = LE, op1 = const0_rtx;
4191 else if (code == GT && op1 == constm1_rtx)
4192 code = GE, op1 = const0_rtx;
4194 if (cmode == VOIDmode)
4195 cmode = GET_MODE (op0);
4197 if (swap_commutative_operands_p (op2, op3)
4198 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4199 != UNKNOWN))
4201 tem = op2;
4202 op2 = op3;
4203 op3 = tem;
4204 code = reversed;
4207 if (mode == VOIDmode)
4208 mode = GET_MODE (op2);
4210 icode = addcc_optab->handlers[(int) mode].insn_code;
4212 if (icode == CODE_FOR_nothing)
4213 return 0;
4215 if (!target)
4216 target = gen_reg_rtx (mode);
4218 /* If the insn doesn't accept these operands, put them in pseudos. */
4220 if (!insn_data[icode].operand[0].predicate
4221 (target, insn_data[icode].operand[0].mode))
4222 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4223 else
4224 subtarget = target;
4226 if (!insn_data[icode].operand[2].predicate
4227 (op2, insn_data[icode].operand[2].mode))
4228 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4230 if (!insn_data[icode].operand[3].predicate
4231 (op3, insn_data[icode].operand[3].mode))
4232 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4234 /* Everything should now be in the suitable form, so emit the compare insn
4235 and then the conditional move. */
4237 comparison
4238 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4240 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4241 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4242 return NULL and let the caller figure out how best to deal with this
4243 situation. */
4244 if (GET_CODE (comparison) != code)
4245 return NULL_RTX;
4247 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4249 /* If that failed, then give up. */
4250 if (insn == 0)
4251 return 0;
4253 emit_insn (insn);
4255 if (subtarget != target)
4256 convert_move (target, subtarget, 0);
4258 return target;
4261 /* These functions attempt to generate an insn body, rather than
4262 emitting the insn, but if the gen function already emits them, we
4263 make no attempt to turn them back into naked patterns. */
4265 /* Generate and return an insn body to add Y to X. */
4268 gen_add2_insn (rtx x, rtx y)
4270 int icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4272 gcc_assert (insn_data[icode].operand[0].predicate
4273 (x, insn_data[icode].operand[0].mode));
4274 gcc_assert (insn_data[icode].operand[1].predicate
4275 (x, insn_data[icode].operand[1].mode));
4276 gcc_assert (insn_data[icode].operand[2].predicate
4277 (y, insn_data[icode].operand[2].mode));
4279 return GEN_FCN (icode) (x, x, y);
4282 /* Generate and return an insn body to add r1 and c,
4283 storing the result in r0. */
4285 gen_add3_insn (rtx r0, rtx r1, rtx c)
4287 int icode = (int) add_optab->handlers[(int) GET_MODE (r0)].insn_code;
4289 if (icode == CODE_FOR_nothing
4290 || !(insn_data[icode].operand[0].predicate
4291 (r0, insn_data[icode].operand[0].mode))
4292 || !(insn_data[icode].operand[1].predicate
4293 (r1, insn_data[icode].operand[1].mode))
4294 || !(insn_data[icode].operand[2].predicate
4295 (c, insn_data[icode].operand[2].mode)))
4296 return NULL_RTX;
4298 return GEN_FCN (icode) (r0, r1, c);
4302 have_add2_insn (rtx x, rtx y)
4304 int icode;
4306 gcc_assert (GET_MODE (x) != VOIDmode);
4308 icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4310 if (icode == CODE_FOR_nothing)
4311 return 0;
4313 if (!(insn_data[icode].operand[0].predicate
4314 (x, insn_data[icode].operand[0].mode))
4315 || !(insn_data[icode].operand[1].predicate
4316 (x, insn_data[icode].operand[1].mode))
4317 || !(insn_data[icode].operand[2].predicate
4318 (y, insn_data[icode].operand[2].mode)))
4319 return 0;
4321 return 1;
4324 /* Generate and return an insn body to subtract Y from X. */
4327 gen_sub2_insn (rtx x, rtx y)
4329 int icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4331 gcc_assert (insn_data[icode].operand[0].predicate
4332 (x, insn_data[icode].operand[0].mode));
4333 gcc_assert (insn_data[icode].operand[1].predicate
4334 (x, insn_data[icode].operand[1].mode));
4335 gcc_assert (insn_data[icode].operand[2].predicate
4336 (y, insn_data[icode].operand[2].mode));
4338 return GEN_FCN (icode) (x, x, y);
4341 /* Generate and return an insn body to subtract r1 and c,
4342 storing the result in r0. */
4344 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4346 int icode = (int) sub_optab->handlers[(int) GET_MODE (r0)].insn_code;
4348 if (icode == CODE_FOR_nothing
4349 || !(insn_data[icode].operand[0].predicate
4350 (r0, insn_data[icode].operand[0].mode))
4351 || !(insn_data[icode].operand[1].predicate
4352 (r1, insn_data[icode].operand[1].mode))
4353 || !(insn_data[icode].operand[2].predicate
4354 (c, insn_data[icode].operand[2].mode)))
4355 return NULL_RTX;
4357 return GEN_FCN (icode) (r0, r1, c);
4361 have_sub2_insn (rtx x, rtx y)
4363 int icode;
4365 gcc_assert (GET_MODE (x) != VOIDmode);
4367 icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4369 if (icode == CODE_FOR_nothing)
4370 return 0;
4372 if (!(insn_data[icode].operand[0].predicate
4373 (x, insn_data[icode].operand[0].mode))
4374 || !(insn_data[icode].operand[1].predicate
4375 (x, insn_data[icode].operand[1].mode))
4376 || !(insn_data[icode].operand[2].predicate
4377 (y, insn_data[icode].operand[2].mode)))
4378 return 0;
4380 return 1;
4383 /* Generate the body of an instruction to copy Y into X.
4384 It may be a list of insns, if one insn isn't enough. */
4387 gen_move_insn (rtx x, rtx y)
4389 rtx seq;
4391 start_sequence ();
4392 emit_move_insn_1 (x, y);
4393 seq = get_insns ();
4394 end_sequence ();
4395 return seq;
4398 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4399 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4400 no such operation exists, CODE_FOR_nothing will be returned. */
4402 enum insn_code
4403 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4404 int unsignedp)
4406 convert_optab tab;
4407 #ifdef HAVE_ptr_extend
4408 if (unsignedp < 0)
4409 return CODE_FOR_ptr_extend;
4410 #endif
4412 tab = unsignedp ? zext_optab : sext_optab;
4413 return tab->handlers[to_mode][from_mode].insn_code;
4416 /* Generate the body of an insn to extend Y (with mode MFROM)
4417 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4420 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4421 enum machine_mode mfrom, int unsignedp)
4423 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4424 return GEN_FCN (icode) (x, y);
4427 /* can_fix_p and can_float_p say whether the target machine
4428 can directly convert a given fixed point type to
4429 a given floating point type, or vice versa.
4430 The returned value is the CODE_FOR_... value to use,
4431 or CODE_FOR_nothing if these modes cannot be directly converted.
4433 *TRUNCP_PTR is set to 1 if it is necessary to output
4434 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4436 static enum insn_code
4437 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4438 int unsignedp, int *truncp_ptr)
4440 convert_optab tab;
4441 enum insn_code icode;
4443 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4444 icode = tab->handlers[fixmode][fltmode].insn_code;
4445 if (icode != CODE_FOR_nothing)
4447 *truncp_ptr = 0;
4448 return icode;
4451 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4452 for this to work. We need to rework the fix* and ftrunc* patterns
4453 and documentation. */
4454 tab = unsignedp ? ufix_optab : sfix_optab;
4455 icode = tab->handlers[fixmode][fltmode].insn_code;
4456 if (icode != CODE_FOR_nothing
4457 && ftrunc_optab->handlers[fltmode].insn_code != CODE_FOR_nothing)
4459 *truncp_ptr = 1;
4460 return icode;
4463 *truncp_ptr = 0;
4464 return CODE_FOR_nothing;
4467 static enum insn_code
4468 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4469 int unsignedp)
4471 convert_optab tab;
4473 tab = unsignedp ? ufloat_optab : sfloat_optab;
4474 return tab->handlers[fltmode][fixmode].insn_code;
4477 /* Generate code to convert FROM to floating point
4478 and store in TO. FROM must be fixed point and not VOIDmode.
4479 UNSIGNEDP nonzero means regard FROM as unsigned.
4480 Normally this is done by correcting the final value
4481 if it is negative. */
4483 void
4484 expand_float (rtx to, rtx from, int unsignedp)
4486 enum insn_code icode;
4487 rtx target = to;
4488 enum machine_mode fmode, imode;
4489 bool can_do_signed = false;
4491 /* Crash now, because we won't be able to decide which mode to use. */
4492 gcc_assert (GET_MODE (from) != VOIDmode);
4494 /* Look for an insn to do the conversion. Do it in the specified
4495 modes if possible; otherwise convert either input, output or both to
4496 wider mode. If the integer mode is wider than the mode of FROM,
4497 we can do the conversion signed even if the input is unsigned. */
4499 for (fmode = GET_MODE (to); fmode != VOIDmode;
4500 fmode = GET_MODE_WIDER_MODE (fmode))
4501 for (imode = GET_MODE (from); imode != VOIDmode;
4502 imode = GET_MODE_WIDER_MODE (imode))
4504 int doing_unsigned = unsignedp;
4506 if (fmode != GET_MODE (to)
4507 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from)))
4508 continue;
4510 icode = can_float_p (fmode, imode, unsignedp);
4511 if (icode == CODE_FOR_nothing && unsignedp)
4513 enum insn_code scode = can_float_p (fmode, imode, 0);
4514 if (scode != CODE_FOR_nothing)
4515 can_do_signed = true;
4516 if (imode != GET_MODE (from))
4517 icode = scode, doing_unsigned = 0;
4520 if (icode != CODE_FOR_nothing)
4522 if (imode != GET_MODE (from))
4523 from = convert_to_mode (imode, from, unsignedp);
4525 if (fmode != GET_MODE (to))
4526 target = gen_reg_rtx (fmode);
4528 emit_unop_insn (icode, target, from,
4529 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4531 if (target != to)
4532 convert_move (to, target, 0);
4533 return;
4537 /* Unsigned integer, and no way to convert directly. For binary
4538 floating point modes, convert as signed, then conditionally adjust
4539 the result. */
4540 if (unsignedp && can_do_signed && !DECIMAL_FLOAT_MODE_P (GET_MODE (to)))
4542 rtx label = gen_label_rtx ();
4543 rtx temp;
4544 REAL_VALUE_TYPE offset;
4546 /* Look for a usable floating mode FMODE wider than the source and at
4547 least as wide as the target. Using FMODE will avoid rounding woes
4548 with unsigned values greater than the signed maximum value. */
4550 for (fmode = GET_MODE (to); fmode != VOIDmode;
4551 fmode = GET_MODE_WIDER_MODE (fmode))
4552 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
4553 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
4554 break;
4556 if (fmode == VOIDmode)
4558 /* There is no such mode. Pretend the target is wide enough. */
4559 fmode = GET_MODE (to);
4561 /* Avoid double-rounding when TO is narrower than FROM. */
4562 if ((significand_size (fmode) + 1)
4563 < GET_MODE_BITSIZE (GET_MODE (from)))
4565 rtx temp1;
4566 rtx neglabel = gen_label_rtx ();
4568 /* Don't use TARGET if it isn't a register, is a hard register,
4569 or is the wrong mode. */
4570 if (!REG_P (target)
4571 || REGNO (target) < FIRST_PSEUDO_REGISTER
4572 || GET_MODE (target) != fmode)
4573 target = gen_reg_rtx (fmode);
4575 imode = GET_MODE (from);
4576 do_pending_stack_adjust ();
4578 /* Test whether the sign bit is set. */
4579 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4580 0, neglabel);
4582 /* The sign bit is not set. Convert as signed. */
4583 expand_float (target, from, 0);
4584 emit_jump_insn (gen_jump (label));
4585 emit_barrier ();
4587 /* The sign bit is set.
4588 Convert to a usable (positive signed) value by shifting right
4589 one bit, while remembering if a nonzero bit was shifted
4590 out; i.e., compute (from & 1) | (from >> 1). */
4592 emit_label (neglabel);
4593 temp = expand_binop (imode, and_optab, from, const1_rtx,
4594 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4595 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
4596 NULL_RTX, 1);
4597 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4598 OPTAB_LIB_WIDEN);
4599 expand_float (target, temp, 0);
4601 /* Multiply by 2 to undo the shift above. */
4602 temp = expand_binop (fmode, add_optab, target, target,
4603 target, 0, OPTAB_LIB_WIDEN);
4604 if (temp != target)
4605 emit_move_insn (target, temp);
4607 do_pending_stack_adjust ();
4608 emit_label (label);
4609 goto done;
4613 /* If we are about to do some arithmetic to correct for an
4614 unsigned operand, do it in a pseudo-register. */
4616 if (GET_MODE (to) != fmode
4617 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4618 target = gen_reg_rtx (fmode);
4620 /* Convert as signed integer to floating. */
4621 expand_float (target, from, 0);
4623 /* If FROM is negative (and therefore TO is negative),
4624 correct its value by 2**bitwidth. */
4626 do_pending_stack_adjust ();
4627 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
4628 0, label);
4631 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)));
4632 temp = expand_binop (fmode, add_optab, target,
4633 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
4634 target, 0, OPTAB_LIB_WIDEN);
4635 if (temp != target)
4636 emit_move_insn (target, temp);
4638 do_pending_stack_adjust ();
4639 emit_label (label);
4640 goto done;
4643 /* No hardware instruction available; call a library routine. */
4645 rtx libfunc;
4646 rtx insns;
4647 rtx value;
4648 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4650 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
4651 from = convert_to_mode (SImode, from, unsignedp);
4653 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4654 gcc_assert (libfunc);
4656 start_sequence ();
4658 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4659 GET_MODE (to), 1, from,
4660 GET_MODE (from));
4661 insns = get_insns ();
4662 end_sequence ();
4664 emit_libcall_block (insns, target, value,
4665 gen_rtx_FLOAT (GET_MODE (to), from));
4668 done:
4670 /* Copy result to requested destination
4671 if we have been computing in a temp location. */
4673 if (target != to)
4675 if (GET_MODE (target) == GET_MODE (to))
4676 emit_move_insn (to, target);
4677 else
4678 convert_move (to, target, 0);
4682 /* Generate code to convert FROM to fixed point and store in TO. FROM
4683 must be floating point. */
4685 void
4686 expand_fix (rtx to, rtx from, int unsignedp)
4688 enum insn_code icode;
4689 rtx target = to;
4690 enum machine_mode fmode, imode;
4691 int must_trunc = 0;
4693 /* We first try to find a pair of modes, one real and one integer, at
4694 least as wide as FROM and TO, respectively, in which we can open-code
4695 this conversion. If the integer mode is wider than the mode of TO,
4696 we can do the conversion either signed or unsigned. */
4698 for (fmode = GET_MODE (from); fmode != VOIDmode;
4699 fmode = GET_MODE_WIDER_MODE (fmode))
4700 for (imode = GET_MODE (to); imode != VOIDmode;
4701 imode = GET_MODE_WIDER_MODE (imode))
4703 int doing_unsigned = unsignedp;
4705 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
4706 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
4707 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
4709 if (icode != CODE_FOR_nothing)
4711 if (fmode != GET_MODE (from))
4712 from = convert_to_mode (fmode, from, 0);
4714 if (must_trunc)
4716 rtx temp = gen_reg_rtx (GET_MODE (from));
4717 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
4718 temp, 0);
4721 if (imode != GET_MODE (to))
4722 target = gen_reg_rtx (imode);
4724 emit_unop_insn (icode, target, from,
4725 doing_unsigned ? UNSIGNED_FIX : FIX);
4726 if (target != to)
4727 convert_move (to, target, unsignedp);
4728 return;
4732 /* For an unsigned conversion, there is one more way to do it.
4733 If we have a signed conversion, we generate code that compares
4734 the real value to the largest representable positive number. If if
4735 is smaller, the conversion is done normally. Otherwise, subtract
4736 one plus the highest signed number, convert, and add it back.
4738 We only need to check all real modes, since we know we didn't find
4739 anything with a wider integer mode.
4741 This code used to extend FP value into mode wider than the destination.
4742 This is not needed. Consider, for instance conversion from SFmode
4743 into DImode.
4745 The hot path trought the code is dealing with inputs smaller than 2^63
4746 and doing just the conversion, so there is no bits to lose.
4748 In the other path we know the value is positive in the range 2^63..2^64-1
4749 inclusive. (as for other imput overflow happens and result is undefined)
4750 So we know that the most important bit set in mantissa corresponds to
4751 2^63. The subtraction of 2^63 should not generate any rounding as it
4752 simply clears out that bit. The rest is trivial. */
4754 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
4755 for (fmode = GET_MODE (from); fmode != VOIDmode;
4756 fmode = GET_MODE_WIDER_MODE (fmode))
4757 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0,
4758 &must_trunc))
4760 int bitsize;
4761 REAL_VALUE_TYPE offset;
4762 rtx limit, lab1, lab2, insn;
4764 bitsize = GET_MODE_BITSIZE (GET_MODE (to));
4765 real_2expN (&offset, bitsize - 1);
4766 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
4767 lab1 = gen_label_rtx ();
4768 lab2 = gen_label_rtx ();
4770 if (fmode != GET_MODE (from))
4771 from = convert_to_mode (fmode, from, 0);
4773 /* See if we need to do the subtraction. */
4774 do_pending_stack_adjust ();
4775 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
4776 0, lab1);
4778 /* If not, do the signed "fix" and branch around fixup code. */
4779 expand_fix (to, from, 0);
4780 emit_jump_insn (gen_jump (lab2));
4781 emit_barrier ();
4783 /* Otherwise, subtract 2**(N-1), convert to signed number,
4784 then add 2**(N-1). Do the addition using XOR since this
4785 will often generate better code. */
4786 emit_label (lab1);
4787 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
4788 NULL_RTX, 0, OPTAB_LIB_WIDEN);
4789 expand_fix (to, target, 0);
4790 target = expand_binop (GET_MODE (to), xor_optab, to,
4791 gen_int_mode
4792 ((HOST_WIDE_INT) 1 << (bitsize - 1),
4793 GET_MODE (to)),
4794 to, 1, OPTAB_LIB_WIDEN);
4796 if (target != to)
4797 emit_move_insn (to, target);
4799 emit_label (lab2);
4801 if (mov_optab->handlers[(int) GET_MODE (to)].insn_code
4802 != CODE_FOR_nothing)
4804 /* Make a place for a REG_NOTE and add it. */
4805 insn = emit_move_insn (to, to);
4806 set_unique_reg_note (insn,
4807 REG_EQUAL,
4808 gen_rtx_fmt_e (UNSIGNED_FIX,
4809 GET_MODE (to),
4810 copy_rtx (from)));
4813 return;
4816 /* We can't do it with an insn, so use a library call. But first ensure
4817 that the mode of TO is at least as wide as SImode, since those are the
4818 only library calls we know about. */
4820 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
4822 target = gen_reg_rtx (SImode);
4824 expand_fix (target, from, unsignedp);
4826 else
4828 rtx insns;
4829 rtx value;
4830 rtx libfunc;
4832 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
4833 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4834 gcc_assert (libfunc);
4836 start_sequence ();
4838 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4839 GET_MODE (to), 1, from,
4840 GET_MODE (from));
4841 insns = get_insns ();
4842 end_sequence ();
4844 emit_libcall_block (insns, target, value,
4845 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
4846 GET_MODE (to), from));
4849 if (target != to)
4851 if (GET_MODE (to) == GET_MODE (target))
4852 emit_move_insn (to, target);
4853 else
4854 convert_move (to, target, 0);
4858 /* Report whether we have an instruction to perform the operation
4859 specified by CODE on operands of mode MODE. */
4861 have_insn_for (enum rtx_code code, enum machine_mode mode)
4863 return (code_to_optab[(int) code] != 0
4864 && (code_to_optab[(int) code]->handlers[(int) mode].insn_code
4865 != CODE_FOR_nothing));
4868 /* Create a blank optab. */
4869 static optab
4870 new_optab (void)
4872 int i;
4873 optab op = ggc_alloc (sizeof (struct optab));
4874 for (i = 0; i < NUM_MACHINE_MODES; i++)
4876 op->handlers[i].insn_code = CODE_FOR_nothing;
4877 op->handlers[i].libfunc = 0;
4880 return op;
4883 static convert_optab
4884 new_convert_optab (void)
4886 int i, j;
4887 convert_optab op = ggc_alloc (sizeof (struct convert_optab));
4888 for (i = 0; i < NUM_MACHINE_MODES; i++)
4889 for (j = 0; j < NUM_MACHINE_MODES; j++)
4891 op->handlers[i][j].insn_code = CODE_FOR_nothing;
4892 op->handlers[i][j].libfunc = 0;
4894 return op;
4897 /* Same, but fill in its code as CODE, and write it into the
4898 code_to_optab table. */
4899 static inline optab
4900 init_optab (enum rtx_code code)
4902 optab op = new_optab ();
4903 op->code = code;
4904 code_to_optab[(int) code] = op;
4905 return op;
4908 /* Same, but fill in its code as CODE, and do _not_ write it into
4909 the code_to_optab table. */
4910 static inline optab
4911 init_optabv (enum rtx_code code)
4913 optab op = new_optab ();
4914 op->code = code;
4915 return op;
4918 /* Conversion optabs never go in the code_to_optab table. */
4919 static inline convert_optab
4920 init_convert_optab (enum rtx_code code)
4922 convert_optab op = new_convert_optab ();
4923 op->code = code;
4924 return op;
4927 /* Initialize the libfunc fields of an entire group of entries in some
4928 optab. Each entry is set equal to a string consisting of a leading
4929 pair of underscores followed by a generic operation name followed by
4930 a mode name (downshifted to lowercase) followed by a single character
4931 representing the number of operands for the given operation (which is
4932 usually one of the characters '2', '3', or '4').
4934 OPTABLE is the table in which libfunc fields are to be initialized.
4935 FIRST_MODE is the first machine mode index in the given optab to
4936 initialize.
4937 LAST_MODE is the last machine mode index in the given optab to
4938 initialize.
4939 OPNAME is the generic (string) name of the operation.
4940 SUFFIX is the character which specifies the number of operands for
4941 the given generic operation.
4944 static void
4945 init_libfuncs (optab optable, int first_mode, int last_mode,
4946 const char *opname, int suffix)
4948 int mode;
4949 unsigned opname_len = strlen (opname);
4951 for (mode = first_mode; (int) mode <= (int) last_mode;
4952 mode = (enum machine_mode) ((int) mode + 1))
4954 const char *mname = GET_MODE_NAME (mode);
4955 unsigned mname_len = strlen (mname);
4956 char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1);
4957 char *p;
4958 const char *q;
4960 p = libfunc_name;
4961 *p++ = '_';
4962 *p++ = '_';
4963 for (q = opname; *q; )
4964 *p++ = *q++;
4965 for (q = mname; *q; q++)
4966 *p++ = TOLOWER (*q);
4967 *p++ = suffix;
4968 *p = '\0';
4970 optable->handlers[(int) mode].libfunc
4971 = init_one_libfunc (ggc_alloc_string (libfunc_name, p - libfunc_name));
4975 /* Initialize the libfunc fields of an entire group of entries in some
4976 optab which correspond to all integer mode operations. The parameters
4977 have the same meaning as similarly named ones for the `init_libfuncs'
4978 routine. (See above). */
4980 static void
4981 init_integral_libfuncs (optab optable, const char *opname, int suffix)
4983 int maxsize = 2*BITS_PER_WORD;
4984 if (maxsize < LONG_LONG_TYPE_SIZE)
4985 maxsize = LONG_LONG_TYPE_SIZE;
4986 init_libfuncs (optable, word_mode,
4987 mode_for_size (maxsize, MODE_INT, 0),
4988 opname, suffix);
4991 /* Initialize the libfunc fields of an entire group of entries in some
4992 optab which correspond to all real mode operations. The parameters
4993 have the same meaning as similarly named ones for the `init_libfuncs'
4994 routine. (See above). */
4996 static void
4997 init_floating_libfuncs (optab optable, const char *opname, int suffix)
4999 init_libfuncs (optable, MIN_MODE_FLOAT, MAX_MODE_FLOAT, opname, suffix);
5000 init_libfuncs (optable, MIN_MODE_DECIMAL_FLOAT, MAX_MODE_DECIMAL_FLOAT,
5001 opname, suffix);
5004 /* Initialize the libfunc fields of an entire group of entries of an
5005 inter-mode-class conversion optab. The string formation rules are
5006 similar to the ones for init_libfuncs, above, but instead of having
5007 a mode name and an operand count these functions have two mode names
5008 and no operand count. */
5009 static void
5010 init_interclass_conv_libfuncs (convert_optab tab, const char *opname,
5011 enum mode_class from_class,
5012 enum mode_class to_class)
5014 enum machine_mode first_from_mode = GET_CLASS_NARROWEST_MODE (from_class);
5015 enum machine_mode first_to_mode = GET_CLASS_NARROWEST_MODE (to_class);
5016 size_t opname_len = strlen (opname);
5017 size_t max_mname_len = 0;
5019 enum machine_mode fmode, tmode;
5020 const char *fname, *tname;
5021 const char *q;
5022 char *libfunc_name, *suffix;
5023 char *p;
5025 for (fmode = first_from_mode;
5026 fmode != VOIDmode;
5027 fmode = GET_MODE_WIDER_MODE (fmode))
5028 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (fmode)));
5030 for (tmode = first_to_mode;
5031 tmode != VOIDmode;
5032 tmode = GET_MODE_WIDER_MODE (tmode))
5033 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (tmode)));
5035 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
5036 libfunc_name[0] = '_';
5037 libfunc_name[1] = '_';
5038 memcpy (&libfunc_name[2], opname, opname_len);
5039 suffix = libfunc_name + opname_len + 2;
5041 for (fmode = first_from_mode; fmode != VOIDmode;
5042 fmode = GET_MODE_WIDER_MODE (fmode))
5043 for (tmode = first_to_mode; tmode != VOIDmode;
5044 tmode = GET_MODE_WIDER_MODE (tmode))
5046 fname = GET_MODE_NAME (fmode);
5047 tname = GET_MODE_NAME (tmode);
5049 p = suffix;
5050 for (q = fname; *q; p++, q++)
5051 *p = TOLOWER (*q);
5052 for (q = tname; *q; p++, q++)
5053 *p = TOLOWER (*q);
5055 *p = '\0';
5057 tab->handlers[tmode][fmode].libfunc
5058 = init_one_libfunc (ggc_alloc_string (libfunc_name,
5059 p - libfunc_name));
5063 /* Initialize the libfunc fields of an entire group of entries of an
5064 intra-mode-class conversion optab. The string formation rules are
5065 similar to the ones for init_libfunc, above. WIDENING says whether
5066 the optab goes from narrow to wide modes or vice versa. These functions
5067 have two mode names _and_ an operand count. */
5068 static void
5069 init_intraclass_conv_libfuncs (convert_optab tab, const char *opname,
5070 enum mode_class class, bool widening)
5072 enum machine_mode first_mode = GET_CLASS_NARROWEST_MODE (class);
5073 size_t opname_len = strlen (opname);
5074 size_t max_mname_len = 0;
5076 enum machine_mode nmode, wmode;
5077 const char *nname, *wname;
5078 const char *q;
5079 char *libfunc_name, *suffix;
5080 char *p;
5082 for (nmode = first_mode; nmode != VOIDmode;
5083 nmode = GET_MODE_WIDER_MODE (nmode))
5084 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (nmode)));
5086 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
5087 libfunc_name[0] = '_';
5088 libfunc_name[1] = '_';
5089 memcpy (&libfunc_name[2], opname, opname_len);
5090 suffix = libfunc_name + opname_len + 2;
5092 for (nmode = first_mode; nmode != VOIDmode;
5093 nmode = GET_MODE_WIDER_MODE (nmode))
5094 for (wmode = GET_MODE_WIDER_MODE (nmode); wmode != VOIDmode;
5095 wmode = GET_MODE_WIDER_MODE (wmode))
5097 nname = GET_MODE_NAME (nmode);
5098 wname = GET_MODE_NAME (wmode);
5100 p = suffix;
5101 for (q = widening ? nname : wname; *q; p++, q++)
5102 *p = TOLOWER (*q);
5103 for (q = widening ? wname : nname; *q; p++, q++)
5104 *p = TOLOWER (*q);
5106 *p++ = '2';
5107 *p = '\0';
5109 tab->handlers[widening ? wmode : nmode]
5110 [widening ? nmode : wmode].libfunc
5111 = init_one_libfunc (ggc_alloc_string (libfunc_name,
5112 p - libfunc_name));
5118 init_one_libfunc (const char *name)
5120 rtx symbol;
5122 /* Create a FUNCTION_DECL that can be passed to
5123 targetm.encode_section_info. */
5124 /* ??? We don't have any type information except for this is
5125 a function. Pretend this is "int foo()". */
5126 tree decl = build_decl (FUNCTION_DECL, get_identifier (name),
5127 build_function_type (integer_type_node, NULL_TREE));
5128 DECL_ARTIFICIAL (decl) = 1;
5129 DECL_EXTERNAL (decl) = 1;
5130 TREE_PUBLIC (decl) = 1;
5132 symbol = XEXP (DECL_RTL (decl), 0);
5134 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
5135 are the flags assigned by targetm.encode_section_info. */
5136 SET_SYMBOL_REF_DECL (symbol, 0);
5138 return symbol;
5141 /* Call this to reset the function entry for one optab (OPTABLE) in mode
5142 MODE to NAME, which should be either 0 or a string constant. */
5143 void
5144 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
5146 if (name)
5147 optable->handlers[mode].libfunc = init_one_libfunc (name);
5148 else
5149 optable->handlers[mode].libfunc = 0;
5152 /* Call this to reset the function entry for one conversion optab
5153 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
5154 either 0 or a string constant. */
5155 void
5156 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
5157 enum machine_mode fmode, const char *name)
5159 if (name)
5160 optable->handlers[tmode][fmode].libfunc = init_one_libfunc (name);
5161 else
5162 optable->handlers[tmode][fmode].libfunc = 0;
5165 /* Call this once to initialize the contents of the optabs
5166 appropriately for the current target machine. */
5168 void
5169 init_optabs (void)
5171 unsigned int i;
5173 /* Start by initializing all tables to contain CODE_FOR_nothing. */
5175 for (i = 0; i < NUM_RTX_CODE; i++)
5176 setcc_gen_code[i] = CODE_FOR_nothing;
5178 #ifdef HAVE_conditional_move
5179 for (i = 0; i < NUM_MACHINE_MODES; i++)
5180 movcc_gen_code[i] = CODE_FOR_nothing;
5181 #endif
5183 for (i = 0; i < NUM_MACHINE_MODES; i++)
5185 vcond_gen_code[i] = CODE_FOR_nothing;
5186 vcondu_gen_code[i] = CODE_FOR_nothing;
5189 add_optab = init_optab (PLUS);
5190 addv_optab = init_optabv (PLUS);
5191 sub_optab = init_optab (MINUS);
5192 subv_optab = init_optabv (MINUS);
5193 smul_optab = init_optab (MULT);
5194 smulv_optab = init_optabv (MULT);
5195 smul_highpart_optab = init_optab (UNKNOWN);
5196 umul_highpart_optab = init_optab (UNKNOWN);
5197 smul_widen_optab = init_optab (UNKNOWN);
5198 umul_widen_optab = init_optab (UNKNOWN);
5199 usmul_widen_optab = init_optab (UNKNOWN);
5200 sdiv_optab = init_optab (DIV);
5201 sdivv_optab = init_optabv (DIV);
5202 sdivmod_optab = init_optab (UNKNOWN);
5203 udiv_optab = init_optab (UDIV);
5204 udivmod_optab = init_optab (UNKNOWN);
5205 smod_optab = init_optab (MOD);
5206 umod_optab = init_optab (UMOD);
5207 fmod_optab = init_optab (UNKNOWN);
5208 drem_optab = init_optab (UNKNOWN);
5209 ftrunc_optab = init_optab (UNKNOWN);
5210 and_optab = init_optab (AND);
5211 ior_optab = init_optab (IOR);
5212 xor_optab = init_optab (XOR);
5213 ashl_optab = init_optab (ASHIFT);
5214 ashr_optab = init_optab (ASHIFTRT);
5215 lshr_optab = init_optab (LSHIFTRT);
5216 rotl_optab = init_optab (ROTATE);
5217 rotr_optab = init_optab (ROTATERT);
5218 smin_optab = init_optab (SMIN);
5219 smax_optab = init_optab (SMAX);
5220 umin_optab = init_optab (UMIN);
5221 umax_optab = init_optab (UMAX);
5222 pow_optab = init_optab (UNKNOWN);
5223 atan2_optab = init_optab (UNKNOWN);
5225 /* These three have codes assigned exclusively for the sake of
5226 have_insn_for. */
5227 mov_optab = init_optab (SET);
5228 movstrict_optab = init_optab (STRICT_LOW_PART);
5229 cmp_optab = init_optab (COMPARE);
5231 ucmp_optab = init_optab (UNKNOWN);
5232 tst_optab = init_optab (UNKNOWN);
5234 eq_optab = init_optab (EQ);
5235 ne_optab = init_optab (NE);
5236 gt_optab = init_optab (GT);
5237 ge_optab = init_optab (GE);
5238 lt_optab = init_optab (LT);
5239 le_optab = init_optab (LE);
5240 unord_optab = init_optab (UNORDERED);
5242 neg_optab = init_optab (NEG);
5243 negv_optab = init_optabv (NEG);
5244 abs_optab = init_optab (ABS);
5245 absv_optab = init_optabv (ABS);
5246 addcc_optab = init_optab (UNKNOWN);
5247 one_cmpl_optab = init_optab (NOT);
5248 ffs_optab = init_optab (FFS);
5249 clz_optab = init_optab (CLZ);
5250 ctz_optab = init_optab (CTZ);
5251 popcount_optab = init_optab (POPCOUNT);
5252 parity_optab = init_optab (PARITY);
5253 sqrt_optab = init_optab (SQRT);
5254 floor_optab = init_optab (UNKNOWN);
5255 lfloor_optab = init_optab (UNKNOWN);
5256 ceil_optab = init_optab (UNKNOWN);
5257 lceil_optab = init_optab (UNKNOWN);
5258 round_optab = init_optab (UNKNOWN);
5259 btrunc_optab = init_optab (UNKNOWN);
5260 nearbyint_optab = init_optab (UNKNOWN);
5261 rint_optab = init_optab (UNKNOWN);
5262 lrint_optab = init_optab (UNKNOWN);
5263 sincos_optab = init_optab (UNKNOWN);
5264 sin_optab = init_optab (UNKNOWN);
5265 asin_optab = init_optab (UNKNOWN);
5266 cos_optab = init_optab (UNKNOWN);
5267 acos_optab = init_optab (UNKNOWN);
5268 exp_optab = init_optab (UNKNOWN);
5269 exp10_optab = init_optab (UNKNOWN);
5270 exp2_optab = init_optab (UNKNOWN);
5271 expm1_optab = init_optab (UNKNOWN);
5272 ldexp_optab = init_optab (UNKNOWN);
5273 logb_optab = init_optab (UNKNOWN);
5274 ilogb_optab = init_optab (UNKNOWN);
5275 log_optab = init_optab (UNKNOWN);
5276 log10_optab = init_optab (UNKNOWN);
5277 log2_optab = init_optab (UNKNOWN);
5278 log1p_optab = init_optab (UNKNOWN);
5279 tan_optab = init_optab (UNKNOWN);
5280 atan_optab = init_optab (UNKNOWN);
5281 copysign_optab = init_optab (UNKNOWN);
5283 strlen_optab = init_optab (UNKNOWN);
5284 cbranch_optab = init_optab (UNKNOWN);
5285 cmov_optab = init_optab (UNKNOWN);
5286 cstore_optab = init_optab (UNKNOWN);
5287 push_optab = init_optab (UNKNOWN);
5289 reduc_smax_optab = init_optab (UNKNOWN);
5290 reduc_umax_optab = init_optab (UNKNOWN);
5291 reduc_smin_optab = init_optab (UNKNOWN);
5292 reduc_umin_optab = init_optab (UNKNOWN);
5293 reduc_splus_optab = init_optab (UNKNOWN);
5294 reduc_uplus_optab = init_optab (UNKNOWN);
5296 ssum_widen_optab = init_optab (UNKNOWN);
5297 usum_widen_optab = init_optab (UNKNOWN);
5298 sdot_prod_optab = init_optab (UNKNOWN);
5299 udot_prod_optab = init_optab (UNKNOWN);
5301 vec_extract_optab = init_optab (UNKNOWN);
5302 vec_set_optab = init_optab (UNKNOWN);
5303 vec_init_optab = init_optab (UNKNOWN);
5304 vec_shl_optab = init_optab (UNKNOWN);
5305 vec_shr_optab = init_optab (UNKNOWN);
5306 vec_realign_load_optab = init_optab (UNKNOWN);
5307 movmisalign_optab = init_optab (UNKNOWN);
5309 powi_optab = init_optab (UNKNOWN);
5311 /* Conversions. */
5312 sext_optab = init_convert_optab (SIGN_EXTEND);
5313 zext_optab = init_convert_optab (ZERO_EXTEND);
5314 trunc_optab = init_convert_optab (TRUNCATE);
5315 sfix_optab = init_convert_optab (FIX);
5316 ufix_optab = init_convert_optab (UNSIGNED_FIX);
5317 sfixtrunc_optab = init_convert_optab (UNKNOWN);
5318 ufixtrunc_optab = init_convert_optab (UNKNOWN);
5319 sfloat_optab = init_convert_optab (FLOAT);
5320 ufloat_optab = init_convert_optab (UNSIGNED_FLOAT);
5322 for (i = 0; i < NUM_MACHINE_MODES; i++)
5324 movmem_optab[i] = CODE_FOR_nothing;
5325 cmpstr_optab[i] = CODE_FOR_nothing;
5326 cmpstrn_optab[i] = CODE_FOR_nothing;
5327 cmpmem_optab[i] = CODE_FOR_nothing;
5328 setmem_optab[i] = CODE_FOR_nothing;
5330 sync_add_optab[i] = CODE_FOR_nothing;
5331 sync_sub_optab[i] = CODE_FOR_nothing;
5332 sync_ior_optab[i] = CODE_FOR_nothing;
5333 sync_and_optab[i] = CODE_FOR_nothing;
5334 sync_xor_optab[i] = CODE_FOR_nothing;
5335 sync_nand_optab[i] = CODE_FOR_nothing;
5336 sync_old_add_optab[i] = CODE_FOR_nothing;
5337 sync_old_sub_optab[i] = CODE_FOR_nothing;
5338 sync_old_ior_optab[i] = CODE_FOR_nothing;
5339 sync_old_and_optab[i] = CODE_FOR_nothing;
5340 sync_old_xor_optab[i] = CODE_FOR_nothing;
5341 sync_old_nand_optab[i] = CODE_FOR_nothing;
5342 sync_new_add_optab[i] = CODE_FOR_nothing;
5343 sync_new_sub_optab[i] = CODE_FOR_nothing;
5344 sync_new_ior_optab[i] = CODE_FOR_nothing;
5345 sync_new_and_optab[i] = CODE_FOR_nothing;
5346 sync_new_xor_optab[i] = CODE_FOR_nothing;
5347 sync_new_nand_optab[i] = CODE_FOR_nothing;
5348 sync_compare_and_swap[i] = CODE_FOR_nothing;
5349 sync_compare_and_swap_cc[i] = CODE_FOR_nothing;
5350 sync_lock_test_and_set[i] = CODE_FOR_nothing;
5351 sync_lock_release[i] = CODE_FOR_nothing;
5353 reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
5356 /* Fill in the optabs with the insns we support. */
5357 init_all_optabs ();
5359 /* Initialize the optabs with the names of the library functions. */
5360 init_integral_libfuncs (add_optab, "add", '3');
5361 init_floating_libfuncs (add_optab, "add", '3');
5362 init_integral_libfuncs (addv_optab, "addv", '3');
5363 init_floating_libfuncs (addv_optab, "add", '3');
5364 init_integral_libfuncs (sub_optab, "sub", '3');
5365 init_floating_libfuncs (sub_optab, "sub", '3');
5366 init_integral_libfuncs (subv_optab, "subv", '3');
5367 init_floating_libfuncs (subv_optab, "sub", '3');
5368 init_integral_libfuncs (smul_optab, "mul", '3');
5369 init_floating_libfuncs (smul_optab, "mul", '3');
5370 init_integral_libfuncs (smulv_optab, "mulv", '3');
5371 init_floating_libfuncs (smulv_optab, "mul", '3');
5372 init_integral_libfuncs (sdiv_optab, "div", '3');
5373 init_floating_libfuncs (sdiv_optab, "div", '3');
5374 init_integral_libfuncs (sdivv_optab, "divv", '3');
5375 init_integral_libfuncs (udiv_optab, "udiv", '3');
5376 init_integral_libfuncs (sdivmod_optab, "divmod", '4');
5377 init_integral_libfuncs (udivmod_optab, "udivmod", '4');
5378 init_integral_libfuncs (smod_optab, "mod", '3');
5379 init_integral_libfuncs (umod_optab, "umod", '3');
5380 init_floating_libfuncs (ftrunc_optab, "ftrunc", '2');
5381 init_integral_libfuncs (and_optab, "and", '3');
5382 init_integral_libfuncs (ior_optab, "ior", '3');
5383 init_integral_libfuncs (xor_optab, "xor", '3');
5384 init_integral_libfuncs (ashl_optab, "ashl", '3');
5385 init_integral_libfuncs (ashr_optab, "ashr", '3');
5386 init_integral_libfuncs (lshr_optab, "lshr", '3');
5387 init_integral_libfuncs (smin_optab, "min", '3');
5388 init_floating_libfuncs (smin_optab, "min", '3');
5389 init_integral_libfuncs (smax_optab, "max", '3');
5390 init_floating_libfuncs (smax_optab, "max", '3');
5391 init_integral_libfuncs (umin_optab, "umin", '3');
5392 init_integral_libfuncs (umax_optab, "umax", '3');
5393 init_integral_libfuncs (neg_optab, "neg", '2');
5394 init_floating_libfuncs (neg_optab, "neg", '2');
5395 init_integral_libfuncs (negv_optab, "negv", '2');
5396 init_floating_libfuncs (negv_optab, "neg", '2');
5397 init_integral_libfuncs (one_cmpl_optab, "one_cmpl", '2');
5398 init_integral_libfuncs (ffs_optab, "ffs", '2');
5399 init_integral_libfuncs (clz_optab, "clz", '2');
5400 init_integral_libfuncs (ctz_optab, "ctz", '2');
5401 init_integral_libfuncs (popcount_optab, "popcount", '2');
5402 init_integral_libfuncs (parity_optab, "parity", '2');
5404 /* Comparison libcalls for integers MUST come in pairs,
5405 signed/unsigned. */
5406 init_integral_libfuncs (cmp_optab, "cmp", '2');
5407 init_integral_libfuncs (ucmp_optab, "ucmp", '2');
5408 init_floating_libfuncs (cmp_optab, "cmp", '2');
5410 /* EQ etc are floating point only. */
5411 init_floating_libfuncs (eq_optab, "eq", '2');
5412 init_floating_libfuncs (ne_optab, "ne", '2');
5413 init_floating_libfuncs (gt_optab, "gt", '2');
5414 init_floating_libfuncs (ge_optab, "ge", '2');
5415 init_floating_libfuncs (lt_optab, "lt", '2');
5416 init_floating_libfuncs (le_optab, "le", '2');
5417 init_floating_libfuncs (unord_optab, "unord", '2');
5419 init_floating_libfuncs (powi_optab, "powi", '2');
5421 /* Conversions. */
5422 init_interclass_conv_libfuncs (sfloat_optab, "float",
5423 MODE_INT, MODE_FLOAT);
5424 init_interclass_conv_libfuncs (sfloat_optab, "float",
5425 MODE_INT, MODE_DECIMAL_FLOAT);
5426 init_interclass_conv_libfuncs (ufloat_optab, "floatun",
5427 MODE_INT, MODE_FLOAT);
5428 init_interclass_conv_libfuncs (ufloat_optab, "floatun",
5429 MODE_INT, MODE_DECIMAL_FLOAT);
5430 init_interclass_conv_libfuncs (sfix_optab, "fix",
5431 MODE_FLOAT, MODE_INT);
5432 init_interclass_conv_libfuncs (sfix_optab, "fix",
5433 MODE_DECIMAL_FLOAT, MODE_INT);
5434 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5435 MODE_FLOAT, MODE_INT);
5436 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5437 MODE_DECIMAL_FLOAT, MODE_INT);
5438 init_interclass_conv_libfuncs (ufloat_optab, "floatuns",
5439 MODE_INT, MODE_DECIMAL_FLOAT);
5441 /* sext_optab is also used for FLOAT_EXTEND. */
5442 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, true);
5443 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_DECIMAL_FLOAT, true);
5444 init_interclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, MODE_DECIMAL_FLOAT);
5445 init_interclass_conv_libfuncs (sext_optab, "extend", MODE_DECIMAL_FLOAT, MODE_FLOAT);
5446 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, false);
5447 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_DECIMAL_FLOAT, false);
5448 init_interclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, MODE_DECIMAL_FLOAT);
5449 init_interclass_conv_libfuncs (trunc_optab, "trunc", MODE_DECIMAL_FLOAT, MODE_FLOAT);
5451 /* Use cabs for double complex abs, since systems generally have cabs.
5452 Don't define any libcall for float complex, so that cabs will be used. */
5453 if (complex_double_type_node)
5454 abs_optab->handlers[TYPE_MODE (complex_double_type_node)].libfunc
5455 = init_one_libfunc ("cabs");
5457 /* The ffs function operates on `int'. */
5458 ffs_optab->handlers[(int) mode_for_size (INT_TYPE_SIZE, MODE_INT, 0)].libfunc
5459 = init_one_libfunc ("ffs");
5461 abort_libfunc = init_one_libfunc ("abort");
5462 memcpy_libfunc = init_one_libfunc ("memcpy");
5463 memmove_libfunc = init_one_libfunc ("memmove");
5464 memcmp_libfunc = init_one_libfunc ("memcmp");
5465 memset_libfunc = init_one_libfunc ("memset");
5466 setbits_libfunc = init_one_libfunc ("__setbits");
5468 #ifndef DONT_USE_BUILTIN_SETJMP
5469 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
5470 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
5471 #else
5472 setjmp_libfunc = init_one_libfunc ("setjmp");
5473 longjmp_libfunc = init_one_libfunc ("longjmp");
5474 #endif
5475 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
5476 unwind_sjlj_unregister_libfunc
5477 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5479 /* For function entry/exit instrumentation. */
5480 profile_function_entry_libfunc
5481 = init_one_libfunc ("__cyg_profile_func_enter");
5482 profile_function_exit_libfunc
5483 = init_one_libfunc ("__cyg_profile_func_exit");
5485 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
5487 if (HAVE_conditional_trap)
5488 trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
5490 /* Allow the target to add more libcalls or rename some, etc. */
5491 targetm.init_libfuncs ();
5494 #ifdef DEBUG
5496 /* Print information about the current contents of the optabs on
5497 STDERR. */
5499 static void
5500 debug_optab_libfuncs (void)
5502 int i;
5503 int j;
5504 int k;
5506 /* Dump the arithmetic optabs. */
5507 for (i = 0; i != (int) OTI_MAX; i++)
5508 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5510 optab o;
5511 struct optab_handlers *h;
5513 o = optab_table[i];
5514 h = &o->handlers[j];
5515 if (h->libfunc)
5517 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5518 fprintf (stderr, "%s\t%s:\t%s\n",
5519 GET_RTX_NAME (o->code),
5520 GET_MODE_NAME (j),
5521 XSTR (h->libfunc, 0));
5525 /* Dump the conversion optabs. */
5526 for (i = 0; i < (int) COI_MAX; ++i)
5527 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5528 for (k = 0; k < NUM_MACHINE_MODES; ++k)
5530 convert_optab o;
5531 struct optab_handlers *h;
5533 o = &convert_optab_table[i];
5534 h = &o->handlers[j][k];
5535 if (h->libfunc)
5537 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5538 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
5539 GET_RTX_NAME (o->code),
5540 GET_MODE_NAME (j),
5541 GET_MODE_NAME (k),
5542 XSTR (h->libfunc, 0));
5547 #endif /* DEBUG */
5550 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5551 CODE. Return 0 on failure. */
5554 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1,
5555 rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED)
5557 enum machine_mode mode = GET_MODE (op1);
5558 enum insn_code icode;
5559 rtx insn;
5561 if (!HAVE_conditional_trap)
5562 return 0;
5564 if (mode == VOIDmode)
5565 return 0;
5567 icode = cmp_optab->handlers[(int) mode].insn_code;
5568 if (icode == CODE_FOR_nothing)
5569 return 0;
5571 start_sequence ();
5572 op1 = prepare_operand (icode, op1, 0, mode, mode, 0);
5573 op2 = prepare_operand (icode, op2, 1, mode, mode, 0);
5574 if (!op1 || !op2)
5576 end_sequence ();
5577 return 0;
5579 emit_insn (GEN_FCN (icode) (op1, op2));
5581 PUT_CODE (trap_rtx, code);
5582 gcc_assert (HAVE_conditional_trap);
5583 insn = gen_conditional_trap (trap_rtx, tcode);
5584 if (insn)
5586 emit_insn (insn);
5587 insn = get_insns ();
5589 end_sequence ();
5591 return insn;
5594 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5595 or unsigned operation code. */
5597 static enum rtx_code
5598 get_rtx_code (enum tree_code tcode, bool unsignedp)
5600 enum rtx_code code;
5601 switch (tcode)
5603 case EQ_EXPR:
5604 code = EQ;
5605 break;
5606 case NE_EXPR:
5607 code = NE;
5608 break;
5609 case LT_EXPR:
5610 code = unsignedp ? LTU : LT;
5611 break;
5612 case LE_EXPR:
5613 code = unsignedp ? LEU : LE;
5614 break;
5615 case GT_EXPR:
5616 code = unsignedp ? GTU : GT;
5617 break;
5618 case GE_EXPR:
5619 code = unsignedp ? GEU : GE;
5620 break;
5622 case UNORDERED_EXPR:
5623 code = UNORDERED;
5624 break;
5625 case ORDERED_EXPR:
5626 code = ORDERED;
5627 break;
5628 case UNLT_EXPR:
5629 code = UNLT;
5630 break;
5631 case UNLE_EXPR:
5632 code = UNLE;
5633 break;
5634 case UNGT_EXPR:
5635 code = UNGT;
5636 break;
5637 case UNGE_EXPR:
5638 code = UNGE;
5639 break;
5640 case UNEQ_EXPR:
5641 code = UNEQ;
5642 break;
5643 case LTGT_EXPR:
5644 code = LTGT;
5645 break;
5647 default:
5648 gcc_unreachable ();
5650 return code;
5653 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5654 unsigned operators. Do not generate compare instruction. */
5656 static rtx
5657 vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
5659 enum rtx_code rcode;
5660 tree t_op0, t_op1;
5661 rtx rtx_op0, rtx_op1;
5663 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
5664 ensures that condition is a relational operation. */
5665 gcc_assert (COMPARISON_CLASS_P (cond));
5667 rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
5668 t_op0 = TREE_OPERAND (cond, 0);
5669 t_op1 = TREE_OPERAND (cond, 1);
5671 /* Expand operands. */
5672 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)), 1);
5673 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)), 1);
5675 if (!insn_data[icode].operand[4].predicate (rtx_op0, GET_MODE (rtx_op0))
5676 && GET_MODE (rtx_op0) != VOIDmode)
5677 rtx_op0 = force_reg (GET_MODE (rtx_op0), rtx_op0);
5679 if (!insn_data[icode].operand[5].predicate (rtx_op1, GET_MODE (rtx_op1))
5680 && GET_MODE (rtx_op1) != VOIDmode)
5681 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
5683 return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1);
5686 /* Return insn code for VEC_COND_EXPR EXPR. */
5688 static inline enum insn_code
5689 get_vcond_icode (tree expr, enum machine_mode mode)
5691 enum insn_code icode = CODE_FOR_nothing;
5693 if (TYPE_UNSIGNED (TREE_TYPE (expr)))
5694 icode = vcondu_gen_code[mode];
5695 else
5696 icode = vcond_gen_code[mode];
5697 return icode;
5700 /* Return TRUE iff, appropriate vector insns are available
5701 for vector cond expr expr in VMODE mode. */
5703 bool
5704 expand_vec_cond_expr_p (tree expr, enum machine_mode vmode)
5706 if (get_vcond_icode (expr, vmode) == CODE_FOR_nothing)
5707 return false;
5708 return true;
5711 /* Generate insns for VEC_COND_EXPR. */
5714 expand_vec_cond_expr (tree vec_cond_expr, rtx target)
5716 enum insn_code icode;
5717 rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1;
5718 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_cond_expr));
5719 bool unsignedp = TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr));
5721 icode = get_vcond_icode (vec_cond_expr, mode);
5722 if (icode == CODE_FOR_nothing)
5723 return 0;
5725 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5726 target = gen_reg_rtx (mode);
5728 /* Get comparison rtx. First expand both cond expr operands. */
5729 comparison = vector_compare_rtx (TREE_OPERAND (vec_cond_expr, 0),
5730 unsignedp, icode);
5731 cc_op0 = XEXP (comparison, 0);
5732 cc_op1 = XEXP (comparison, 1);
5733 /* Expand both operands and force them in reg, if required. */
5734 rtx_op1 = expand_expr (TREE_OPERAND (vec_cond_expr, 1),
5735 NULL_RTX, VOIDmode, EXPAND_NORMAL);
5736 if (!insn_data[icode].operand[1].predicate (rtx_op1, mode)
5737 && mode != VOIDmode)
5738 rtx_op1 = force_reg (mode, rtx_op1);
5740 rtx_op2 = expand_expr (TREE_OPERAND (vec_cond_expr, 2),
5741 NULL_RTX, VOIDmode, EXPAND_NORMAL);
5742 if (!insn_data[icode].operand[2].predicate (rtx_op2, mode)
5743 && mode != VOIDmode)
5744 rtx_op2 = force_reg (mode, rtx_op2);
5746 /* Emit instruction! */
5747 emit_insn (GEN_FCN (icode) (target, rtx_op1, rtx_op2,
5748 comparison, cc_op0, cc_op1));
5750 return target;
5754 /* This is an internal subroutine of the other compare_and_swap expanders.
5755 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
5756 operation. TARGET is an optional place to store the value result of
5757 the operation. ICODE is the particular instruction to expand. Return
5758 the result of the operation. */
5760 static rtx
5761 expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val,
5762 rtx target, enum insn_code icode)
5764 enum machine_mode mode = GET_MODE (mem);
5765 rtx insn;
5767 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5768 target = gen_reg_rtx (mode);
5770 if (GET_MODE (old_val) != VOIDmode && GET_MODE (old_val) != mode)
5771 old_val = convert_modes (mode, GET_MODE (old_val), old_val, 1);
5772 if (!insn_data[icode].operand[2].predicate (old_val, mode))
5773 old_val = force_reg (mode, old_val);
5775 if (GET_MODE (new_val) != VOIDmode && GET_MODE (new_val) != mode)
5776 new_val = convert_modes (mode, GET_MODE (new_val), new_val, 1);
5777 if (!insn_data[icode].operand[3].predicate (new_val, mode))
5778 new_val = force_reg (mode, new_val);
5780 insn = GEN_FCN (icode) (target, mem, old_val, new_val);
5781 if (insn == NULL_RTX)
5782 return NULL_RTX;
5783 emit_insn (insn);
5785 return target;
5788 /* Expand a compare-and-swap operation and return its value. */
5791 expand_val_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
5793 enum machine_mode mode = GET_MODE (mem);
5794 enum insn_code icode = sync_compare_and_swap[mode];
5796 if (icode == CODE_FOR_nothing)
5797 return NULL_RTX;
5799 return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode);
5802 /* Expand a compare-and-swap operation and store true into the result if
5803 the operation was successful and false otherwise. Return the result.
5804 Unlike other routines, TARGET is not optional. */
5807 expand_bool_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
5809 enum machine_mode mode = GET_MODE (mem);
5810 enum insn_code icode;
5811 rtx subtarget, label0, label1;
5813 /* If the target supports a compare-and-swap pattern that simultaneously
5814 sets some flag for success, then use it. Otherwise use the regular
5815 compare-and-swap and follow that immediately with a compare insn. */
5816 icode = sync_compare_and_swap_cc[mode];
5817 switch (icode)
5819 default:
5820 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
5821 NULL_RTX, icode);
5822 if (subtarget != NULL_RTX)
5823 break;
5825 /* FALLTHRU */
5826 case CODE_FOR_nothing:
5827 icode = sync_compare_and_swap[mode];
5828 if (icode == CODE_FOR_nothing)
5829 return NULL_RTX;
5831 /* Ensure that if old_val == mem, that we're not comparing
5832 against an old value. */
5833 if (MEM_P (old_val))
5834 old_val = force_reg (mode, old_val);
5836 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
5837 NULL_RTX, icode);
5838 if (subtarget == NULL_RTX)
5839 return NULL_RTX;
5841 emit_cmp_insn (subtarget, old_val, EQ, const0_rtx, mode, true);
5844 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
5845 setcc instruction from the beginning. We don't work too hard here,
5846 but it's nice to not be stupid about initial code gen either. */
5847 if (STORE_FLAG_VALUE == 1)
5849 icode = setcc_gen_code[EQ];
5850 if (icode != CODE_FOR_nothing)
5852 enum machine_mode cmode = insn_data[icode].operand[0].mode;
5853 rtx insn;
5855 subtarget = target;
5856 if (!insn_data[icode].operand[0].predicate (target, cmode))
5857 subtarget = gen_reg_rtx (cmode);
5859 insn = GEN_FCN (icode) (subtarget);
5860 if (insn)
5862 emit_insn (insn);
5863 if (GET_MODE (target) != GET_MODE (subtarget))
5865 convert_move (target, subtarget, 1);
5866 subtarget = target;
5868 return subtarget;
5873 /* Without an appropriate setcc instruction, use a set of branches to
5874 get 1 and 0 stored into target. Presumably if the target has a
5875 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
5877 label0 = gen_label_rtx ();
5878 label1 = gen_label_rtx ();
5880 emit_jump_insn (bcc_gen_fctn[EQ] (label0));
5881 emit_move_insn (target, const0_rtx);
5882 emit_jump_insn (gen_jump (label1));
5883 emit_barrier ();
5884 emit_label (label0);
5885 emit_move_insn (target, const1_rtx);
5886 emit_label (label1);
5888 return target;
5891 /* This is a helper function for the other atomic operations. This function
5892 emits a loop that contains SEQ that iterates until a compare-and-swap
5893 operation at the end succeeds. MEM is the memory to be modified. SEQ is
5894 a set of instructions that takes a value from OLD_REG as an input and
5895 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
5896 set to the current contents of MEM. After SEQ, a compare-and-swap will
5897 attempt to update MEM with NEW_REG. The function returns true when the
5898 loop was generated successfully. */
5900 static bool
5901 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
5903 enum machine_mode mode = GET_MODE (mem);
5904 enum insn_code icode;
5905 rtx label, cmp_reg, subtarget;
5907 /* The loop we want to generate looks like
5909 cmp_reg = mem;
5910 label:
5911 old_reg = cmp_reg;
5912 seq;
5913 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
5914 if (cmp_reg != old_reg)
5915 goto label;
5917 Note that we only do the plain load from memory once. Subsequent
5918 iterations use the value loaded by the compare-and-swap pattern. */
5920 label = gen_label_rtx ();
5921 cmp_reg = gen_reg_rtx (mode);
5923 emit_move_insn (cmp_reg, mem);
5924 emit_label (label);
5925 emit_move_insn (old_reg, cmp_reg);
5926 if (seq)
5927 emit_insn (seq);
5929 /* If the target supports a compare-and-swap pattern that simultaneously
5930 sets some flag for success, then use it. Otherwise use the regular
5931 compare-and-swap and follow that immediately with a compare insn. */
5932 icode = sync_compare_and_swap_cc[mode];
5933 switch (icode)
5935 default:
5936 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
5937 cmp_reg, icode);
5938 if (subtarget != NULL_RTX)
5940 gcc_assert (subtarget == cmp_reg);
5941 break;
5944 /* FALLTHRU */
5945 case CODE_FOR_nothing:
5946 icode = sync_compare_and_swap[mode];
5947 if (icode == CODE_FOR_nothing)
5948 return false;
5950 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
5951 cmp_reg, icode);
5952 if (subtarget == NULL_RTX)
5953 return false;
5954 if (subtarget != cmp_reg)
5955 emit_move_insn (cmp_reg, subtarget);
5957 emit_cmp_insn (cmp_reg, old_reg, EQ, const0_rtx, mode, true);
5960 /* ??? Mark this jump predicted not taken? */
5961 emit_jump_insn (bcc_gen_fctn[NE] (label));
5963 return true;
5966 /* This function generates the atomic operation MEM CODE= VAL. In this
5967 case, we do not care about any resulting value. Returns NULL if we
5968 cannot generate the operation. */
5971 expand_sync_operation (rtx mem, rtx val, enum rtx_code code)
5973 enum machine_mode mode = GET_MODE (mem);
5974 enum insn_code icode;
5975 rtx insn;
5977 /* Look to see if the target supports the operation directly. */
5978 switch (code)
5980 case PLUS:
5981 icode = sync_add_optab[mode];
5982 break;
5983 case IOR:
5984 icode = sync_ior_optab[mode];
5985 break;
5986 case XOR:
5987 icode = sync_xor_optab[mode];
5988 break;
5989 case AND:
5990 icode = sync_and_optab[mode];
5991 break;
5992 case NOT:
5993 icode = sync_nand_optab[mode];
5994 break;
5996 case MINUS:
5997 icode = sync_sub_optab[mode];
5998 if (icode == CODE_FOR_nothing)
6000 icode = sync_add_optab[mode];
6001 if (icode != CODE_FOR_nothing)
6003 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
6004 code = PLUS;
6007 break;
6009 default:
6010 gcc_unreachable ();
6013 /* Generate the direct operation, if present. */
6014 if (icode != CODE_FOR_nothing)
6016 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6017 val = convert_modes (mode, GET_MODE (val), val, 1);
6018 if (!insn_data[icode].operand[1].predicate (val, mode))
6019 val = force_reg (mode, val);
6021 insn = GEN_FCN (icode) (mem, val);
6022 if (insn)
6024 emit_insn (insn);
6025 return const0_rtx;
6029 /* Failing that, generate a compare-and-swap loop in which we perform the
6030 operation with normal arithmetic instructions. */
6031 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6033 rtx t0 = gen_reg_rtx (mode), t1;
6035 start_sequence ();
6037 t1 = t0;
6038 if (code == NOT)
6040 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6041 code = AND;
6043 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6044 true, OPTAB_LIB_WIDEN);
6046 insn = get_insns ();
6047 end_sequence ();
6049 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6050 return const0_rtx;
6053 return NULL_RTX;
6056 /* This function generates the atomic operation MEM CODE= VAL. In this
6057 case, we do care about the resulting value: if AFTER is true then
6058 return the value MEM holds after the operation, if AFTER is false
6059 then return the value MEM holds before the operation. TARGET is an
6060 optional place for the result value to be stored. */
6063 expand_sync_fetch_operation (rtx mem, rtx val, enum rtx_code code,
6064 bool after, rtx target)
6066 enum machine_mode mode = GET_MODE (mem);
6067 enum insn_code old_code, new_code, icode;
6068 bool compensate;
6069 rtx insn;
6071 /* Look to see if the target supports the operation directly. */
6072 switch (code)
6074 case PLUS:
6075 old_code = sync_old_add_optab[mode];
6076 new_code = sync_new_add_optab[mode];
6077 break;
6078 case IOR:
6079 old_code = sync_old_ior_optab[mode];
6080 new_code = sync_new_ior_optab[mode];
6081 break;
6082 case XOR:
6083 old_code = sync_old_xor_optab[mode];
6084 new_code = sync_new_xor_optab[mode];
6085 break;
6086 case AND:
6087 old_code = sync_old_and_optab[mode];
6088 new_code = sync_new_and_optab[mode];
6089 break;
6090 case NOT:
6091 old_code = sync_old_nand_optab[mode];
6092 new_code = sync_new_nand_optab[mode];
6093 break;
6095 case MINUS:
6096 old_code = sync_old_sub_optab[mode];
6097 new_code = sync_new_sub_optab[mode];
6098 if (old_code == CODE_FOR_nothing && new_code == CODE_FOR_nothing)
6100 old_code = sync_old_add_optab[mode];
6101 new_code = sync_new_add_optab[mode];
6102 if (old_code != CODE_FOR_nothing || new_code != CODE_FOR_nothing)
6104 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
6105 code = PLUS;
6108 break;
6110 default:
6111 gcc_unreachable ();
6114 /* If the target does supports the proper new/old operation, great. But
6115 if we only support the opposite old/new operation, check to see if we
6116 can compensate. In the case in which the old value is supported, then
6117 we can always perform the operation again with normal arithmetic. In
6118 the case in which the new value is supported, then we can only handle
6119 this in the case the operation is reversible. */
6120 compensate = false;
6121 if (after)
6123 icode = new_code;
6124 if (icode == CODE_FOR_nothing)
6126 icode = old_code;
6127 if (icode != CODE_FOR_nothing)
6128 compensate = true;
6131 else
6133 icode = old_code;
6134 if (icode == CODE_FOR_nothing
6135 && (code == PLUS || code == MINUS || code == XOR))
6137 icode = new_code;
6138 if (icode != CODE_FOR_nothing)
6139 compensate = true;
6143 /* If we found something supported, great. */
6144 if (icode != CODE_FOR_nothing)
6146 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6147 target = gen_reg_rtx (mode);
6149 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6150 val = convert_modes (mode, GET_MODE (val), val, 1);
6151 if (!insn_data[icode].operand[2].predicate (val, mode))
6152 val = force_reg (mode, val);
6154 insn = GEN_FCN (icode) (target, mem, val);
6155 if (insn)
6157 emit_insn (insn);
6159 /* If we need to compensate for using an operation with the
6160 wrong return value, do so now. */
6161 if (compensate)
6163 if (!after)
6165 if (code == PLUS)
6166 code = MINUS;
6167 else if (code == MINUS)
6168 code = PLUS;
6171 if (code == NOT)
6172 target = expand_simple_unop (mode, NOT, target, NULL_RTX, true);
6173 target = expand_simple_binop (mode, code, target, val, NULL_RTX,
6174 true, OPTAB_LIB_WIDEN);
6177 return target;
6181 /* Failing that, generate a compare-and-swap loop in which we perform the
6182 operation with normal arithmetic instructions. */
6183 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6185 rtx t0 = gen_reg_rtx (mode), t1;
6187 if (!target || !register_operand (target, mode))
6188 target = gen_reg_rtx (mode);
6190 start_sequence ();
6192 if (!after)
6193 emit_move_insn (target, t0);
6194 t1 = t0;
6195 if (code == NOT)
6197 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6198 code = AND;
6200 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6201 true, OPTAB_LIB_WIDEN);
6202 if (after)
6203 emit_move_insn (target, t1);
6205 insn = get_insns ();
6206 end_sequence ();
6208 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6209 return target;
6212 return NULL_RTX;
6215 /* This function expands a test-and-set operation. Ideally we atomically
6216 store VAL in MEM and return the previous value in MEM. Some targets
6217 may not support this operation and only support VAL with the constant 1;
6218 in this case while the return value will be 0/1, but the exact value
6219 stored in MEM is target defined. TARGET is an option place to stick
6220 the return value. */
6223 expand_sync_lock_test_and_set (rtx mem, rtx val, rtx target)
6225 enum machine_mode mode = GET_MODE (mem);
6226 enum insn_code icode;
6227 rtx insn;
6229 /* If the target supports the test-and-set directly, great. */
6230 icode = sync_lock_test_and_set[mode];
6231 if (icode != CODE_FOR_nothing)
6233 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6234 target = gen_reg_rtx (mode);
6236 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6237 val = convert_modes (mode, GET_MODE (val), val, 1);
6238 if (!insn_data[icode].operand[2].predicate (val, mode))
6239 val = force_reg (mode, val);
6241 insn = GEN_FCN (icode) (target, mem, val);
6242 if (insn)
6244 emit_insn (insn);
6245 return target;
6249 /* Otherwise, use a compare-and-swap loop for the exchange. */
6250 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6252 if (!target || !register_operand (target, mode))
6253 target = gen_reg_rtx (mode);
6254 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6255 val = convert_modes (mode, GET_MODE (val), val, 1);
6256 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
6257 return target;
6260 return NULL_RTX;
6263 #include "gt-optabs.h"