PR c++/53989
[official-gcc.git] / gcc / optabs.c
blobe1ecc657484a835fe0a34aa8f925d7011154bbda
1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011, 2012 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "diagnostic-core.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
32 #include "rtl.h"
33 #include "tree.h"
34 #include "tm_p.h"
35 #include "flags.h"
36 #include "function.h"
37 #include "except.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "libfuncs.h"
41 #include "recog.h"
42 #include "reload.h"
43 #include "ggc.h"
44 #include "basic-block.h"
45 #include "target.h"
47 struct target_optabs default_target_optabs;
48 struct target_libfuncs default_target_libfuncs;
49 #if SWITCHABLE_TARGET
50 struct target_optabs *this_target_optabs = &default_target_optabs;
51 struct target_libfuncs *this_target_libfuncs = &default_target_libfuncs;
52 #endif
54 #define libfunc_hash \
55 (this_target_libfuncs->x_libfunc_hash)
57 /* Contains the optab used for each rtx code. */
58 optab code_to_optab[NUM_RTX_CODE + 1];
60 static void prepare_float_lib_cmp (rtx, rtx, enum rtx_code, rtx *,
61 enum machine_mode *);
62 static rtx expand_unop_direct (enum machine_mode, optab, rtx, rtx, int);
63 static void emit_libcall_block_1 (rtx, rtx, rtx, rtx, bool);
65 /* Debug facility for use in GDB. */
66 void debug_optab_libfuncs (void);
68 /* Prefixes for the current version of decimal floating point (BID vs. DPD) */
69 #if ENABLE_DECIMAL_BID_FORMAT
70 #define DECIMAL_PREFIX "bid_"
71 #else
72 #define DECIMAL_PREFIX "dpd_"
73 #endif
75 /* Used for libfunc_hash. */
77 static hashval_t
78 hash_libfunc (const void *p)
80 const struct libfunc_entry *const e = (const struct libfunc_entry *) p;
82 return (((int) e->mode1 + (int) e->mode2 * NUM_MACHINE_MODES)
83 ^ e->optab);
86 /* Used for libfunc_hash. */
88 static int
89 eq_libfunc (const void *p, const void *q)
91 const struct libfunc_entry *const e1 = (const struct libfunc_entry *) p;
92 const struct libfunc_entry *const e2 = (const struct libfunc_entry *) q;
94 return (e1->optab == e2->optab
95 && e1->mode1 == e2->mode1
96 && e1->mode2 == e2->mode2);
99 /* Return libfunc corresponding operation defined by OPTAB converting
100 from MODE2 to MODE1. Trigger lazy initialization if needed, return NULL
101 if no libfunc is available. */
103 convert_optab_libfunc (convert_optab optab, enum machine_mode mode1,
104 enum machine_mode mode2)
106 struct libfunc_entry e;
107 struct libfunc_entry **slot;
109 e.optab = (size_t) (optab - &convert_optab_table[0]);
110 e.mode1 = mode1;
111 e.mode2 = mode2;
112 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
113 if (!slot)
115 if (optab->libcall_gen)
117 optab->libcall_gen (optab, optab->libcall_basename, mode1, mode2);
118 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
119 if (slot)
120 return (*slot)->libfunc;
121 else
122 return NULL;
124 return NULL;
126 return (*slot)->libfunc;
129 /* Return libfunc corresponding operation defined by OPTAB in MODE.
130 Trigger lazy initialization if needed, return NULL if no libfunc is
131 available. */
133 optab_libfunc (optab optab, enum machine_mode mode)
135 struct libfunc_entry e;
136 struct libfunc_entry **slot;
138 e.optab = (size_t) (optab - &optab_table[0]);
139 e.mode1 = mode;
140 e.mode2 = VOIDmode;
141 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
142 if (!slot)
144 if (optab->libcall_gen)
146 optab->libcall_gen (optab, optab->libcall_basename,
147 optab->libcall_suffix, mode);
148 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash,
149 &e, NO_INSERT);
150 if (slot)
151 return (*slot)->libfunc;
152 else
153 return NULL;
155 return NULL;
157 return (*slot)->libfunc;
161 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
162 the result of operation CODE applied to OP0 (and OP1 if it is a binary
163 operation).
165 If the last insn does not set TARGET, don't do anything, but return 1.
167 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
168 don't add the REG_EQUAL note but return 0. Our caller can then try
169 again, ensuring that TARGET is not one of the operands. */
171 static int
172 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
174 rtx last_insn, insn, set;
175 rtx note;
177 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
179 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
180 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
181 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
182 && GET_RTX_CLASS (code) != RTX_COMPARE
183 && GET_RTX_CLASS (code) != RTX_UNARY)
184 return 1;
186 if (GET_CODE (target) == ZERO_EXTRACT)
187 return 1;
189 for (last_insn = insns;
190 NEXT_INSN (last_insn) != NULL_RTX;
191 last_insn = NEXT_INSN (last_insn))
194 set = single_set (last_insn);
195 if (set == NULL_RTX)
196 return 1;
198 if (! rtx_equal_p (SET_DEST (set), target)
199 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
200 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
201 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
202 return 1;
204 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
205 besides the last insn. */
206 if (reg_overlap_mentioned_p (target, op0)
207 || (op1 && reg_overlap_mentioned_p (target, op1)))
209 insn = PREV_INSN (last_insn);
210 while (insn != NULL_RTX)
212 if (reg_set_p (target, insn))
213 return 0;
215 insn = PREV_INSN (insn);
219 if (GET_RTX_CLASS (code) == RTX_UNARY)
220 switch (code)
222 case FFS:
223 case CLZ:
224 case CTZ:
225 case CLRSB:
226 case POPCOUNT:
227 case PARITY:
228 case BSWAP:
229 if (GET_MODE (op0) != VOIDmode && GET_MODE (target) != GET_MODE (op0))
231 note = gen_rtx_fmt_e (code, GET_MODE (op0), copy_rtx (op0));
232 if (GET_MODE_SIZE (GET_MODE (op0))
233 > GET_MODE_SIZE (GET_MODE (target)))
234 note = simplify_gen_unary (TRUNCATE, GET_MODE (target),
235 note, GET_MODE (op0));
236 else
237 note = simplify_gen_unary (ZERO_EXTEND, GET_MODE (target),
238 note, GET_MODE (op0));
239 break;
241 /* FALLTHRU */
242 default:
243 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
244 break;
246 else
247 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
249 set_unique_reg_note (last_insn, REG_EQUAL, note);
251 return 1;
254 /* Given two input operands, OP0 and OP1, determine what the correct from_mode
255 for a widening operation would be. In most cases this would be OP0, but if
256 that's a constant it'll be VOIDmode, which isn't useful. */
258 static enum machine_mode
259 widened_mode (enum machine_mode to_mode, rtx op0, rtx op1)
261 enum machine_mode m0 = GET_MODE (op0);
262 enum machine_mode m1 = GET_MODE (op1);
263 enum machine_mode result;
265 if (m0 == VOIDmode && m1 == VOIDmode)
266 return to_mode;
267 else if (m0 == VOIDmode || GET_MODE_SIZE (m0) < GET_MODE_SIZE (m1))
268 result = m1;
269 else
270 result = m0;
272 if (GET_MODE_SIZE (result) > GET_MODE_SIZE (to_mode))
273 return to_mode;
275 return result;
278 /* Find a widening optab even if it doesn't widen as much as we want.
279 E.g. if from_mode is HImode, and to_mode is DImode, and there is no
280 direct HI->SI insn, then return SI->DI, if that exists.
281 If PERMIT_NON_WIDENING is non-zero then this can be used with
282 non-widening optabs also. */
284 enum insn_code
285 find_widening_optab_handler_and_mode (optab op, enum machine_mode to_mode,
286 enum machine_mode from_mode,
287 int permit_non_widening,
288 enum machine_mode *found_mode)
290 for (; (permit_non_widening || from_mode != to_mode)
291 && GET_MODE_SIZE (from_mode) <= GET_MODE_SIZE (to_mode)
292 && from_mode != VOIDmode;
293 from_mode = GET_MODE_WIDER_MODE (from_mode))
295 enum insn_code handler = widening_optab_handler (op, to_mode,
296 from_mode);
298 if (handler != CODE_FOR_nothing)
300 if (found_mode)
301 *found_mode = from_mode;
302 return handler;
306 return CODE_FOR_nothing;
309 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
310 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
311 not actually do a sign-extend or zero-extend, but can leave the
312 higher-order bits of the result rtx undefined, for example, in the case
313 of logical operations, but not right shifts. */
315 static rtx
316 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
317 int unsignedp, int no_extend)
319 rtx result;
321 /* If we don't have to extend and this is a constant, return it. */
322 if (no_extend && GET_MODE (op) == VOIDmode)
323 return op;
325 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
326 extend since it will be more efficient to do so unless the signedness of
327 a promoted object differs from our extension. */
328 if (! no_extend
329 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
330 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
331 return convert_modes (mode, oldmode, op, unsignedp);
333 /* If MODE is no wider than a single word, we return a paradoxical
334 SUBREG. */
335 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
336 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
338 /* Otherwise, get an object of MODE, clobber it, and set the low-order
339 part to OP. */
341 result = gen_reg_rtx (mode);
342 emit_clobber (result);
343 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
344 return result;
347 /* Return the optab used for computing the operation given by the tree code,
348 CODE and the tree EXP. This function is not always usable (for example, it
349 cannot give complete results for multiplication or division) but probably
350 ought to be relied on more widely throughout the expander. */
351 optab
352 optab_for_tree_code (enum tree_code code, const_tree type,
353 enum optab_subtype subtype)
355 bool trapv;
356 switch (code)
358 case BIT_AND_EXPR:
359 return and_optab;
361 case BIT_IOR_EXPR:
362 return ior_optab;
364 case BIT_NOT_EXPR:
365 return one_cmpl_optab;
367 case BIT_XOR_EXPR:
368 return xor_optab;
370 case MULT_HIGHPART_EXPR:
371 return TYPE_UNSIGNED (type) ? umul_highpart_optab : smul_highpart_optab;
373 case TRUNC_MOD_EXPR:
374 case CEIL_MOD_EXPR:
375 case FLOOR_MOD_EXPR:
376 case ROUND_MOD_EXPR:
377 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
379 case RDIV_EXPR:
380 case TRUNC_DIV_EXPR:
381 case CEIL_DIV_EXPR:
382 case FLOOR_DIV_EXPR:
383 case ROUND_DIV_EXPR:
384 case EXACT_DIV_EXPR:
385 if (TYPE_SATURATING(type))
386 return TYPE_UNSIGNED(type) ? usdiv_optab : ssdiv_optab;
387 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
389 case LSHIFT_EXPR:
390 if (TREE_CODE (type) == VECTOR_TYPE)
392 if (subtype == optab_vector)
393 return TYPE_SATURATING (type) ? NULL : vashl_optab;
395 gcc_assert (subtype == optab_scalar);
397 if (TYPE_SATURATING(type))
398 return TYPE_UNSIGNED(type) ? usashl_optab : ssashl_optab;
399 return ashl_optab;
401 case RSHIFT_EXPR:
402 if (TREE_CODE (type) == VECTOR_TYPE)
404 if (subtype == optab_vector)
405 return TYPE_UNSIGNED (type) ? vlshr_optab : vashr_optab;
407 gcc_assert (subtype == optab_scalar);
409 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
411 case LROTATE_EXPR:
412 if (TREE_CODE (type) == VECTOR_TYPE)
414 if (subtype == optab_vector)
415 return vrotl_optab;
417 gcc_assert (subtype == optab_scalar);
419 return rotl_optab;
421 case RROTATE_EXPR:
422 if (TREE_CODE (type) == VECTOR_TYPE)
424 if (subtype == optab_vector)
425 return vrotr_optab;
427 gcc_assert (subtype == optab_scalar);
429 return rotr_optab;
431 case MAX_EXPR:
432 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
434 case MIN_EXPR:
435 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
437 case REALIGN_LOAD_EXPR:
438 return vec_realign_load_optab;
440 case WIDEN_SUM_EXPR:
441 return TYPE_UNSIGNED (type) ? usum_widen_optab : ssum_widen_optab;
443 case DOT_PROD_EXPR:
444 return TYPE_UNSIGNED (type) ? udot_prod_optab : sdot_prod_optab;
446 case WIDEN_MULT_PLUS_EXPR:
447 return (TYPE_UNSIGNED (type)
448 ? (TYPE_SATURATING (type)
449 ? usmadd_widen_optab : umadd_widen_optab)
450 : (TYPE_SATURATING (type)
451 ? ssmadd_widen_optab : smadd_widen_optab));
453 case WIDEN_MULT_MINUS_EXPR:
454 return (TYPE_UNSIGNED (type)
455 ? (TYPE_SATURATING (type)
456 ? usmsub_widen_optab : umsub_widen_optab)
457 : (TYPE_SATURATING (type)
458 ? ssmsub_widen_optab : smsub_widen_optab));
460 case FMA_EXPR:
461 return fma_optab;
463 case REDUC_MAX_EXPR:
464 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
466 case REDUC_MIN_EXPR:
467 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
469 case REDUC_PLUS_EXPR:
470 return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
472 case VEC_LSHIFT_EXPR:
473 return vec_shl_optab;
475 case VEC_RSHIFT_EXPR:
476 return vec_shr_optab;
478 case VEC_WIDEN_MULT_HI_EXPR:
479 return TYPE_UNSIGNED (type) ?
480 vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
482 case VEC_WIDEN_MULT_LO_EXPR:
483 return TYPE_UNSIGNED (type) ?
484 vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
486 case VEC_WIDEN_MULT_EVEN_EXPR:
487 return TYPE_UNSIGNED (type) ?
488 vec_widen_umult_even_optab : vec_widen_smult_even_optab;
490 case VEC_WIDEN_MULT_ODD_EXPR:
491 return TYPE_UNSIGNED (type) ?
492 vec_widen_umult_odd_optab : vec_widen_smult_odd_optab;
494 case VEC_WIDEN_LSHIFT_HI_EXPR:
495 return TYPE_UNSIGNED (type) ?
496 vec_widen_ushiftl_hi_optab : vec_widen_sshiftl_hi_optab;
498 case VEC_WIDEN_LSHIFT_LO_EXPR:
499 return TYPE_UNSIGNED (type) ?
500 vec_widen_ushiftl_lo_optab : vec_widen_sshiftl_lo_optab;
502 case VEC_UNPACK_HI_EXPR:
503 return TYPE_UNSIGNED (type) ?
504 vec_unpacku_hi_optab : vec_unpacks_hi_optab;
506 case VEC_UNPACK_LO_EXPR:
507 return TYPE_UNSIGNED (type) ?
508 vec_unpacku_lo_optab : vec_unpacks_lo_optab;
510 case VEC_UNPACK_FLOAT_HI_EXPR:
511 /* The signedness is determined from input operand. */
512 return TYPE_UNSIGNED (type) ?
513 vec_unpacku_float_hi_optab : vec_unpacks_float_hi_optab;
515 case VEC_UNPACK_FLOAT_LO_EXPR:
516 /* The signedness is determined from input operand. */
517 return TYPE_UNSIGNED (type) ?
518 vec_unpacku_float_lo_optab : vec_unpacks_float_lo_optab;
520 case VEC_PACK_TRUNC_EXPR:
521 return vec_pack_trunc_optab;
523 case VEC_PACK_SAT_EXPR:
524 return TYPE_UNSIGNED (type) ? vec_pack_usat_optab : vec_pack_ssat_optab;
526 case VEC_PACK_FIX_TRUNC_EXPR:
527 /* The signedness is determined from output operand. */
528 return TYPE_UNSIGNED (type) ?
529 vec_pack_ufix_trunc_optab : vec_pack_sfix_trunc_optab;
531 default:
532 break;
535 trapv = INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type);
536 switch (code)
538 case POINTER_PLUS_EXPR:
539 case PLUS_EXPR:
540 if (TYPE_SATURATING(type))
541 return TYPE_UNSIGNED(type) ? usadd_optab : ssadd_optab;
542 return trapv ? addv_optab : add_optab;
544 case MINUS_EXPR:
545 if (TYPE_SATURATING(type))
546 return TYPE_UNSIGNED(type) ? ussub_optab : sssub_optab;
547 return trapv ? subv_optab : sub_optab;
549 case MULT_EXPR:
550 if (TYPE_SATURATING(type))
551 return TYPE_UNSIGNED(type) ? usmul_optab : ssmul_optab;
552 return trapv ? smulv_optab : smul_optab;
554 case NEGATE_EXPR:
555 if (TYPE_SATURATING(type))
556 return TYPE_UNSIGNED(type) ? usneg_optab : ssneg_optab;
557 return trapv ? negv_optab : neg_optab;
559 case ABS_EXPR:
560 return trapv ? absv_optab : abs_optab;
562 default:
563 return NULL;
568 /* Expand vector widening operations.
570 There are two different classes of operations handled here:
571 1) Operations whose result is wider than all the arguments to the operation.
572 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
573 In this case OP0 and optionally OP1 would be initialized,
574 but WIDE_OP wouldn't (not relevant for this case).
575 2) Operations whose result is of the same size as the last argument to the
576 operation, but wider than all the other arguments to the operation.
577 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
578 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
580 E.g, when called to expand the following operations, this is how
581 the arguments will be initialized:
582 nops OP0 OP1 WIDE_OP
583 widening-sum 2 oprnd0 - oprnd1
584 widening-dot-product 3 oprnd0 oprnd1 oprnd2
585 widening-mult 2 oprnd0 oprnd1 -
586 type-promotion (vec-unpack) 1 oprnd0 - - */
589 expand_widen_pattern_expr (sepops ops, rtx op0, rtx op1, rtx wide_op,
590 rtx target, int unsignedp)
592 struct expand_operand eops[4];
593 tree oprnd0, oprnd1, oprnd2;
594 enum machine_mode wmode = VOIDmode, tmode0, tmode1 = VOIDmode;
595 optab widen_pattern_optab;
596 enum insn_code icode;
597 int nops = TREE_CODE_LENGTH (ops->code);
598 int op;
600 oprnd0 = ops->op0;
601 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
602 widen_pattern_optab =
603 optab_for_tree_code (ops->code, TREE_TYPE (oprnd0), optab_default);
604 if (ops->code == WIDEN_MULT_PLUS_EXPR
605 || ops->code == WIDEN_MULT_MINUS_EXPR)
606 icode = find_widening_optab_handler (widen_pattern_optab,
607 TYPE_MODE (TREE_TYPE (ops->op2)),
608 tmode0, 0);
609 else
610 icode = optab_handler (widen_pattern_optab, tmode0);
611 gcc_assert (icode != CODE_FOR_nothing);
613 if (nops >= 2)
615 oprnd1 = ops->op1;
616 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
619 /* The last operand is of a wider mode than the rest of the operands. */
620 if (nops == 2)
621 wmode = tmode1;
622 else if (nops == 3)
624 gcc_assert (tmode1 == tmode0);
625 gcc_assert (op1);
626 oprnd2 = ops->op2;
627 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
630 op = 0;
631 create_output_operand (&eops[op++], target, TYPE_MODE (ops->type));
632 create_convert_operand_from (&eops[op++], op0, tmode0, unsignedp);
633 if (op1)
634 create_convert_operand_from (&eops[op++], op1, tmode1, unsignedp);
635 if (wide_op)
636 create_convert_operand_from (&eops[op++], wide_op, wmode, unsignedp);
637 expand_insn (icode, op, eops);
638 return eops[0].value;
641 /* Generate code to perform an operation specified by TERNARY_OPTAB
642 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
644 UNSIGNEDP is for the case where we have to widen the operands
645 to perform the operation. It says to use zero-extension.
647 If TARGET is nonzero, the value
648 is generated there, if it is convenient to do so.
649 In all cases an rtx is returned for the locus of the value;
650 this may or may not be TARGET. */
653 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
654 rtx op1, rtx op2, rtx target, int unsignedp)
656 struct expand_operand ops[4];
657 enum insn_code icode = optab_handler (ternary_optab, mode);
659 gcc_assert (optab_handler (ternary_optab, mode) != CODE_FOR_nothing);
661 create_output_operand (&ops[0], target, mode);
662 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
663 create_convert_operand_from (&ops[2], op1, mode, unsignedp);
664 create_convert_operand_from (&ops[3], op2, mode, unsignedp);
665 expand_insn (icode, 4, ops);
666 return ops[0].value;
670 /* Like expand_binop, but return a constant rtx if the result can be
671 calculated at compile time. The arguments and return value are
672 otherwise the same as for expand_binop. */
675 simplify_expand_binop (enum machine_mode mode, optab binoptab,
676 rtx op0, rtx op1, rtx target, int unsignedp,
677 enum optab_methods methods)
679 if (CONSTANT_P (op0) && CONSTANT_P (op1))
681 rtx x = simplify_binary_operation (binoptab->code, mode, op0, op1);
683 if (x)
684 return x;
687 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
690 /* Like simplify_expand_binop, but always put the result in TARGET.
691 Return true if the expansion succeeded. */
693 bool
694 force_expand_binop (enum machine_mode mode, optab binoptab,
695 rtx op0, rtx op1, rtx target, int unsignedp,
696 enum optab_methods methods)
698 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
699 target, unsignedp, methods);
700 if (x == 0)
701 return false;
702 if (x != target)
703 emit_move_insn (target, x);
704 return true;
707 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
710 expand_vec_shift_expr (sepops ops, rtx target)
712 struct expand_operand eops[3];
713 enum insn_code icode;
714 rtx rtx_op1, rtx_op2;
715 enum machine_mode mode = TYPE_MODE (ops->type);
716 tree vec_oprnd = ops->op0;
717 tree shift_oprnd = ops->op1;
718 optab shift_optab;
720 switch (ops->code)
722 case VEC_RSHIFT_EXPR:
723 shift_optab = vec_shr_optab;
724 break;
725 case VEC_LSHIFT_EXPR:
726 shift_optab = vec_shl_optab;
727 break;
728 default:
729 gcc_unreachable ();
732 icode = optab_handler (shift_optab, mode);
733 gcc_assert (icode != CODE_FOR_nothing);
735 rtx_op1 = expand_normal (vec_oprnd);
736 rtx_op2 = expand_normal (shift_oprnd);
738 create_output_operand (&eops[0], target, mode);
739 create_input_operand (&eops[1], rtx_op1, GET_MODE (rtx_op1));
740 create_convert_operand_from_type (&eops[2], rtx_op2, TREE_TYPE (shift_oprnd));
741 expand_insn (icode, 3, eops);
743 return eops[0].value;
746 /* Create a new vector value in VMODE with all elements set to OP. The
747 mode of OP must be the element mode of VMODE. If OP is a constant,
748 then the return value will be a constant. */
750 static rtx
751 expand_vector_broadcast (enum machine_mode vmode, rtx op)
753 enum insn_code icode;
754 rtvec vec;
755 rtx ret;
756 int i, n;
758 gcc_checking_assert (VECTOR_MODE_P (vmode));
760 n = GET_MODE_NUNITS (vmode);
761 vec = rtvec_alloc (n);
762 for (i = 0; i < n; ++i)
763 RTVEC_ELT (vec, i) = op;
765 if (CONSTANT_P (op))
766 return gen_rtx_CONST_VECTOR (vmode, vec);
768 /* ??? If the target doesn't have a vec_init, then we have no easy way
769 of performing this operation. Most of this sort of generic support
770 is hidden away in the vector lowering support in gimple. */
771 icode = optab_handler (vec_init_optab, vmode);
772 if (icode == CODE_FOR_nothing)
773 return NULL;
775 ret = gen_reg_rtx (vmode);
776 emit_insn (GEN_FCN (icode) (ret, gen_rtx_PARALLEL (vmode, vec)));
778 return ret;
781 /* This subroutine of expand_doubleword_shift handles the cases in which
782 the effective shift value is >= BITS_PER_WORD. The arguments and return
783 value are the same as for the parent routine, except that SUPERWORD_OP1
784 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
785 INTO_TARGET may be null if the caller has decided to calculate it. */
787 static bool
788 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
789 rtx outof_target, rtx into_target,
790 int unsignedp, enum optab_methods methods)
792 if (into_target != 0)
793 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
794 into_target, unsignedp, methods))
795 return false;
797 if (outof_target != 0)
799 /* For a signed right shift, we must fill OUTOF_TARGET with copies
800 of the sign bit, otherwise we must fill it with zeros. */
801 if (binoptab != ashr_optab)
802 emit_move_insn (outof_target, CONST0_RTX (word_mode));
803 else
804 if (!force_expand_binop (word_mode, binoptab,
805 outof_input, GEN_INT (BITS_PER_WORD - 1),
806 outof_target, unsignedp, methods))
807 return false;
809 return true;
812 /* This subroutine of expand_doubleword_shift handles the cases in which
813 the effective shift value is < BITS_PER_WORD. The arguments and return
814 value are the same as for the parent routine. */
816 static bool
817 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
818 rtx outof_input, rtx into_input, rtx op1,
819 rtx outof_target, rtx into_target,
820 int unsignedp, enum optab_methods methods,
821 unsigned HOST_WIDE_INT shift_mask)
823 optab reverse_unsigned_shift, unsigned_shift;
824 rtx tmp, carries;
826 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
827 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
829 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
830 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
831 the opposite direction to BINOPTAB. */
832 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
834 carries = outof_input;
835 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
836 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
837 0, true, methods);
839 else
841 /* We must avoid shifting by BITS_PER_WORD bits since that is either
842 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
843 has unknown behavior. Do a single shift first, then shift by the
844 remainder. It's OK to use ~OP1 as the remainder if shift counts
845 are truncated to the mode size. */
846 carries = expand_binop (word_mode, reverse_unsigned_shift,
847 outof_input, const1_rtx, 0, unsignedp, methods);
848 if (shift_mask == BITS_PER_WORD - 1)
850 tmp = immed_double_const (-1, -1, op1_mode);
851 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
852 0, true, methods);
854 else
856 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
857 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
858 0, true, methods);
861 if (tmp == 0 || carries == 0)
862 return false;
863 carries = expand_binop (word_mode, reverse_unsigned_shift,
864 carries, tmp, 0, unsignedp, methods);
865 if (carries == 0)
866 return false;
868 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
869 so the result can go directly into INTO_TARGET if convenient. */
870 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
871 into_target, unsignedp, methods);
872 if (tmp == 0)
873 return false;
875 /* Now OR in the bits carried over from OUTOF_INPUT. */
876 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
877 into_target, unsignedp, methods))
878 return false;
880 /* Use a standard word_mode shift for the out-of half. */
881 if (outof_target != 0)
882 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
883 outof_target, unsignedp, methods))
884 return false;
886 return true;
890 #ifdef HAVE_conditional_move
891 /* Try implementing expand_doubleword_shift using conditional moves.
892 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
893 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
894 are the shift counts to use in the former and latter case. All other
895 arguments are the same as the parent routine. */
897 static bool
898 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
899 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
900 rtx outof_input, rtx into_input,
901 rtx subword_op1, rtx superword_op1,
902 rtx outof_target, rtx into_target,
903 int unsignedp, enum optab_methods methods,
904 unsigned HOST_WIDE_INT shift_mask)
906 rtx outof_superword, into_superword;
908 /* Put the superword version of the output into OUTOF_SUPERWORD and
909 INTO_SUPERWORD. */
910 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
911 if (outof_target != 0 && subword_op1 == superword_op1)
913 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
914 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
915 into_superword = outof_target;
916 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
917 outof_superword, 0, unsignedp, methods))
918 return false;
920 else
922 into_superword = gen_reg_rtx (word_mode);
923 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
924 outof_superword, into_superword,
925 unsignedp, methods))
926 return false;
929 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
930 if (!expand_subword_shift (op1_mode, binoptab,
931 outof_input, into_input, subword_op1,
932 outof_target, into_target,
933 unsignedp, methods, shift_mask))
934 return false;
936 /* Select between them. Do the INTO half first because INTO_SUPERWORD
937 might be the current value of OUTOF_TARGET. */
938 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
939 into_target, into_superword, word_mode, false))
940 return false;
942 if (outof_target != 0)
943 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
944 outof_target, outof_superword,
945 word_mode, false))
946 return false;
948 return true;
950 #endif
952 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
953 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
954 input operand; the shift moves bits in the direction OUTOF_INPUT->
955 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
956 of the target. OP1 is the shift count and OP1_MODE is its mode.
957 If OP1 is constant, it will have been truncated as appropriate
958 and is known to be nonzero.
960 If SHIFT_MASK is zero, the result of word shifts is undefined when the
961 shift count is outside the range [0, BITS_PER_WORD). This routine must
962 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
964 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
965 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
966 fill with zeros or sign bits as appropriate.
968 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
969 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
970 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
971 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
972 are undefined.
974 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
975 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
976 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
977 function wants to calculate it itself.
979 Return true if the shift could be successfully synthesized. */
981 static bool
982 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
983 rtx outof_input, rtx into_input, rtx op1,
984 rtx outof_target, rtx into_target,
985 int unsignedp, enum optab_methods methods,
986 unsigned HOST_WIDE_INT shift_mask)
988 rtx superword_op1, tmp, cmp1, cmp2;
989 rtx subword_label, done_label;
990 enum rtx_code cmp_code;
992 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
993 fill the result with sign or zero bits as appropriate. If so, the value
994 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
995 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
996 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
998 This isn't worthwhile for constant shifts since the optimizers will
999 cope better with in-range shift counts. */
1000 if (shift_mask >= BITS_PER_WORD
1001 && outof_target != 0
1002 && !CONSTANT_P (op1))
1004 if (!expand_doubleword_shift (op1_mode, binoptab,
1005 outof_input, into_input, op1,
1006 0, into_target,
1007 unsignedp, methods, shift_mask))
1008 return false;
1009 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
1010 outof_target, unsignedp, methods))
1011 return false;
1012 return true;
1015 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
1016 is true when the effective shift value is less than BITS_PER_WORD.
1017 Set SUPERWORD_OP1 to the shift count that should be used to shift
1018 OUTOF_INPUT into INTO_TARGET when the condition is false. */
1019 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
1020 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
1022 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
1023 is a subword shift count. */
1024 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
1025 0, true, methods);
1026 cmp2 = CONST0_RTX (op1_mode);
1027 cmp_code = EQ;
1028 superword_op1 = op1;
1030 else
1032 /* Set CMP1 to OP1 - BITS_PER_WORD. */
1033 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
1034 0, true, methods);
1035 cmp2 = CONST0_RTX (op1_mode);
1036 cmp_code = LT;
1037 superword_op1 = cmp1;
1039 if (cmp1 == 0)
1040 return false;
1042 /* If we can compute the condition at compile time, pick the
1043 appropriate subroutine. */
1044 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
1045 if (tmp != 0 && CONST_INT_P (tmp))
1047 if (tmp == const0_rtx)
1048 return expand_superword_shift (binoptab, outof_input, superword_op1,
1049 outof_target, into_target,
1050 unsignedp, methods);
1051 else
1052 return expand_subword_shift (op1_mode, binoptab,
1053 outof_input, into_input, op1,
1054 outof_target, into_target,
1055 unsignedp, methods, shift_mask);
1058 #ifdef HAVE_conditional_move
1059 /* Try using conditional moves to generate straight-line code. */
1061 rtx start = get_last_insn ();
1062 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
1063 cmp_code, cmp1, cmp2,
1064 outof_input, into_input,
1065 op1, superword_op1,
1066 outof_target, into_target,
1067 unsignedp, methods, shift_mask))
1068 return true;
1069 delete_insns_since (start);
1071 #endif
1073 /* As a last resort, use branches to select the correct alternative. */
1074 subword_label = gen_label_rtx ();
1075 done_label = gen_label_rtx ();
1077 NO_DEFER_POP;
1078 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
1079 0, 0, subword_label, -1);
1080 OK_DEFER_POP;
1082 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
1083 outof_target, into_target,
1084 unsignedp, methods))
1085 return false;
1087 emit_jump_insn (gen_jump (done_label));
1088 emit_barrier ();
1089 emit_label (subword_label);
1091 if (!expand_subword_shift (op1_mode, binoptab,
1092 outof_input, into_input, op1,
1093 outof_target, into_target,
1094 unsignedp, methods, shift_mask))
1095 return false;
1097 emit_label (done_label);
1098 return true;
1101 /* Subroutine of expand_binop. Perform a double word multiplication of
1102 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
1103 as the target's word_mode. This function return NULL_RTX if anything
1104 goes wrong, in which case it may have already emitted instructions
1105 which need to be deleted.
1107 If we want to multiply two two-word values and have normal and widening
1108 multiplies of single-word values, we can do this with three smaller
1109 multiplications.
1111 The multiplication proceeds as follows:
1112 _______________________
1113 [__op0_high_|__op0_low__]
1114 _______________________
1115 * [__op1_high_|__op1_low__]
1116 _______________________________________________
1117 _______________________
1118 (1) [__op0_low__*__op1_low__]
1119 _______________________
1120 (2a) [__op0_low__*__op1_high_]
1121 _______________________
1122 (2b) [__op0_high_*__op1_low__]
1123 _______________________
1124 (3) [__op0_high_*__op1_high_]
1127 This gives a 4-word result. Since we are only interested in the
1128 lower 2 words, partial result (3) and the upper words of (2a) and
1129 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1130 calculated using non-widening multiplication.
1132 (1), however, needs to be calculated with an unsigned widening
1133 multiplication. If this operation is not directly supported we
1134 try using a signed widening multiplication and adjust the result.
1135 This adjustment works as follows:
1137 If both operands are positive then no adjustment is needed.
1139 If the operands have different signs, for example op0_low < 0 and
1140 op1_low >= 0, the instruction treats the most significant bit of
1141 op0_low as a sign bit instead of a bit with significance
1142 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1143 with 2**BITS_PER_WORD - op0_low, and two's complements the
1144 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1145 the result.
1147 Similarly, if both operands are negative, we need to add
1148 (op0_low + op1_low) * 2**BITS_PER_WORD.
1150 We use a trick to adjust quickly. We logically shift op0_low right
1151 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1152 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1153 logical shift exists, we do an arithmetic right shift and subtract
1154 the 0 or -1. */
1156 static rtx
1157 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
1158 bool umulp, enum optab_methods methods)
1160 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
1161 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
1162 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
1163 rtx product, adjust, product_high, temp;
1165 rtx op0_high = operand_subword_force (op0, high, mode);
1166 rtx op0_low = operand_subword_force (op0, low, mode);
1167 rtx op1_high = operand_subword_force (op1, high, mode);
1168 rtx op1_low = operand_subword_force (op1, low, mode);
1170 /* If we're using an unsigned multiply to directly compute the product
1171 of the low-order words of the operands and perform any required
1172 adjustments of the operands, we begin by trying two more multiplications
1173 and then computing the appropriate sum.
1175 We have checked above that the required addition is provided.
1176 Full-word addition will normally always succeed, especially if
1177 it is provided at all, so we don't worry about its failure. The
1178 multiplication may well fail, however, so we do handle that. */
1180 if (!umulp)
1182 /* ??? This could be done with emit_store_flag where available. */
1183 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
1184 NULL_RTX, 1, methods);
1185 if (temp)
1186 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
1187 NULL_RTX, 0, OPTAB_DIRECT);
1188 else
1190 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
1191 NULL_RTX, 0, methods);
1192 if (!temp)
1193 return NULL_RTX;
1194 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
1195 NULL_RTX, 0, OPTAB_DIRECT);
1198 if (!op0_high)
1199 return NULL_RTX;
1202 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
1203 NULL_RTX, 0, OPTAB_DIRECT);
1204 if (!adjust)
1205 return NULL_RTX;
1207 /* OP0_HIGH should now be dead. */
1209 if (!umulp)
1211 /* ??? This could be done with emit_store_flag where available. */
1212 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
1213 NULL_RTX, 1, methods);
1214 if (temp)
1215 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
1216 NULL_RTX, 0, OPTAB_DIRECT);
1217 else
1219 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
1220 NULL_RTX, 0, methods);
1221 if (!temp)
1222 return NULL_RTX;
1223 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
1224 NULL_RTX, 0, OPTAB_DIRECT);
1227 if (!op1_high)
1228 return NULL_RTX;
1231 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
1232 NULL_RTX, 0, OPTAB_DIRECT);
1233 if (!temp)
1234 return NULL_RTX;
1236 /* OP1_HIGH should now be dead. */
1238 adjust = expand_binop (word_mode, add_optab, adjust, temp,
1239 NULL_RTX, 0, OPTAB_DIRECT);
1241 if (target && !REG_P (target))
1242 target = NULL_RTX;
1244 if (umulp)
1245 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
1246 target, 1, OPTAB_DIRECT);
1247 else
1248 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
1249 target, 1, OPTAB_DIRECT);
1251 if (!product)
1252 return NULL_RTX;
1254 product_high = operand_subword (product, high, 1, mode);
1255 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
1256 NULL_RTX, 0, OPTAB_DIRECT);
1257 emit_move_insn (product_high, adjust);
1258 return product;
1261 /* Wrapper around expand_binop which takes an rtx code to specify
1262 the operation to perform, not an optab pointer. All other
1263 arguments are the same. */
1265 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
1266 rtx op1, rtx target, int unsignedp,
1267 enum optab_methods methods)
1269 optab binop = code_to_optab[(int) code];
1270 gcc_assert (binop);
1272 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1275 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1276 binop. Order them according to commutative_operand_precedence and, if
1277 possible, try to put TARGET or a pseudo first. */
1278 static bool
1279 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
1281 int op0_prec = commutative_operand_precedence (op0);
1282 int op1_prec = commutative_operand_precedence (op1);
1284 if (op0_prec < op1_prec)
1285 return true;
1287 if (op0_prec > op1_prec)
1288 return false;
1290 /* With equal precedence, both orders are ok, but it is better if the
1291 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1292 if (target == 0 || REG_P (target))
1293 return (REG_P (op1) && !REG_P (op0)) || target == op1;
1294 else
1295 return rtx_equal_p (op1, target);
1298 /* Return true if BINOPTAB implements a shift operation. */
1300 static bool
1301 shift_optab_p (optab binoptab)
1303 switch (binoptab->code)
1305 case ASHIFT:
1306 case SS_ASHIFT:
1307 case US_ASHIFT:
1308 case ASHIFTRT:
1309 case LSHIFTRT:
1310 case ROTATE:
1311 case ROTATERT:
1312 return true;
1314 default:
1315 return false;
1319 /* Return true if BINOPTAB implements a commutative binary operation. */
1321 static bool
1322 commutative_optab_p (optab binoptab)
1324 return (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1325 || binoptab == smul_widen_optab
1326 || binoptab == umul_widen_optab
1327 || binoptab == smul_highpart_optab
1328 || binoptab == umul_highpart_optab);
1331 /* X is to be used in mode MODE as operand OPN to BINOPTAB. If we're
1332 optimizing, and if the operand is a constant that costs more than
1333 1 instruction, force the constant into a register and return that
1334 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
1336 static rtx
1337 avoid_expensive_constant (enum machine_mode mode, optab binoptab,
1338 int opn, rtx x, bool unsignedp)
1340 bool speed = optimize_insn_for_speed_p ();
1342 if (mode != VOIDmode
1343 && optimize
1344 && CONSTANT_P (x)
1345 && rtx_cost (x, binoptab->code, opn, speed) > set_src_cost (x, speed))
1347 if (CONST_INT_P (x))
1349 HOST_WIDE_INT intval = trunc_int_for_mode (INTVAL (x), mode);
1350 if (intval != INTVAL (x))
1351 x = GEN_INT (intval);
1353 else
1354 x = convert_modes (mode, VOIDmode, x, unsignedp);
1355 x = force_reg (mode, x);
1357 return x;
1360 /* Helper function for expand_binop: handle the case where there
1361 is an insn that directly implements the indicated operation.
1362 Returns null if this is not possible. */
1363 static rtx
1364 expand_binop_directly (enum machine_mode mode, optab binoptab,
1365 rtx op0, rtx op1,
1366 rtx target, int unsignedp, enum optab_methods methods,
1367 rtx last)
1369 enum machine_mode from_mode = widened_mode (mode, op0, op1);
1370 enum insn_code icode = find_widening_optab_handler (binoptab, mode,
1371 from_mode, 1);
1372 enum machine_mode xmode0 = insn_data[(int) icode].operand[1].mode;
1373 enum machine_mode xmode1 = insn_data[(int) icode].operand[2].mode;
1374 enum machine_mode mode0, mode1, tmp_mode;
1375 struct expand_operand ops[3];
1376 bool commutative_p;
1377 rtx pat;
1378 rtx xop0 = op0, xop1 = op1;
1379 rtx swap;
1381 /* If it is a commutative operator and the modes would match
1382 if we would swap the operands, we can save the conversions. */
1383 commutative_p = commutative_optab_p (binoptab);
1384 if (commutative_p
1385 && GET_MODE (xop0) != xmode0 && GET_MODE (xop1) != xmode1
1386 && GET_MODE (xop0) == xmode1 && GET_MODE (xop1) == xmode1)
1388 swap = xop0;
1389 xop0 = xop1;
1390 xop1 = swap;
1393 /* If we are optimizing, force expensive constants into a register. */
1394 xop0 = avoid_expensive_constant (xmode0, binoptab, 0, xop0, unsignedp);
1395 if (!shift_optab_p (binoptab))
1396 xop1 = avoid_expensive_constant (xmode1, binoptab, 1, xop1, unsignedp);
1398 /* In case the insn wants input operands in modes different from
1399 those of the actual operands, convert the operands. It would
1400 seem that we don't need to convert CONST_INTs, but we do, so
1401 that they're properly zero-extended, sign-extended or truncated
1402 for their mode. */
1404 mode0 = GET_MODE (xop0) != VOIDmode ? GET_MODE (xop0) : mode;
1405 if (xmode0 != VOIDmode && xmode0 != mode0)
1407 xop0 = convert_modes (xmode0, mode0, xop0, unsignedp);
1408 mode0 = xmode0;
1411 mode1 = GET_MODE (xop1) != VOIDmode ? GET_MODE (xop1) : mode;
1412 if (xmode1 != VOIDmode && xmode1 != mode1)
1414 xop1 = convert_modes (xmode1, mode1, xop1, unsignedp);
1415 mode1 = xmode1;
1418 /* If operation is commutative,
1419 try to make the first operand a register.
1420 Even better, try to make it the same as the target.
1421 Also try to make the last operand a constant. */
1422 if (commutative_p
1423 && swap_commutative_operands_with_target (target, xop0, xop1))
1425 swap = xop1;
1426 xop1 = xop0;
1427 xop0 = swap;
1430 /* Now, if insn's predicates don't allow our operands, put them into
1431 pseudo regs. */
1433 if (binoptab == vec_pack_trunc_optab
1434 || binoptab == vec_pack_usat_optab
1435 || binoptab == vec_pack_ssat_optab
1436 || binoptab == vec_pack_ufix_trunc_optab
1437 || binoptab == vec_pack_sfix_trunc_optab)
1439 /* The mode of the result is different then the mode of the
1440 arguments. */
1441 tmp_mode = insn_data[(int) icode].operand[0].mode;
1442 if (GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode))
1444 delete_insns_since (last);
1445 return NULL_RTX;
1448 else
1449 tmp_mode = mode;
1451 create_output_operand (&ops[0], target, tmp_mode);
1452 create_input_operand (&ops[1], xop0, mode0);
1453 create_input_operand (&ops[2], xop1, mode1);
1454 pat = maybe_gen_insn (icode, 3, ops);
1455 if (pat)
1457 /* If PAT is composed of more than one insn, try to add an appropriate
1458 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1459 operand, call expand_binop again, this time without a target. */
1460 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1461 && ! add_equal_note (pat, ops[0].value, binoptab->code,
1462 ops[1].value, ops[2].value))
1464 delete_insns_since (last);
1465 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1466 unsignedp, methods);
1469 emit_insn (pat);
1470 return ops[0].value;
1472 delete_insns_since (last);
1473 return NULL_RTX;
1476 /* Generate code to perform an operation specified by BINOPTAB
1477 on operands OP0 and OP1, with result having machine-mode MODE.
1479 UNSIGNEDP is for the case where we have to widen the operands
1480 to perform the operation. It says to use zero-extension.
1482 If TARGET is nonzero, the value
1483 is generated there, if it is convenient to do so.
1484 In all cases an rtx is returned for the locus of the value;
1485 this may or may not be TARGET. */
1488 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
1489 rtx target, int unsignedp, enum optab_methods methods)
1491 enum optab_methods next_methods
1492 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1493 ? OPTAB_WIDEN : methods);
1494 enum mode_class mclass;
1495 enum machine_mode wider_mode;
1496 rtx libfunc;
1497 rtx temp;
1498 rtx entry_last = get_last_insn ();
1499 rtx last;
1501 mclass = GET_MODE_CLASS (mode);
1503 /* If subtracting an integer constant, convert this into an addition of
1504 the negated constant. */
1506 if (binoptab == sub_optab && CONST_INT_P (op1))
1508 op1 = negate_rtx (mode, op1);
1509 binoptab = add_optab;
1512 /* Record where to delete back to if we backtrack. */
1513 last = get_last_insn ();
1515 /* If we can do it with a three-operand insn, do so. */
1517 if (methods != OPTAB_MUST_WIDEN
1518 && find_widening_optab_handler (binoptab, mode,
1519 widened_mode (mode, op0, op1), 1)
1520 != CODE_FOR_nothing)
1522 temp = expand_binop_directly (mode, binoptab, op0, op1, target,
1523 unsignedp, methods, last);
1524 if (temp)
1525 return temp;
1528 /* If we were trying to rotate, and that didn't work, try rotating
1529 the other direction before falling back to shifts and bitwise-or. */
1530 if (((binoptab == rotl_optab
1531 && optab_handler (rotr_optab, mode) != CODE_FOR_nothing)
1532 || (binoptab == rotr_optab
1533 && optab_handler (rotl_optab, mode) != CODE_FOR_nothing))
1534 && mclass == MODE_INT)
1536 optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab);
1537 rtx newop1;
1538 unsigned int bits = GET_MODE_PRECISION (mode);
1540 if (CONST_INT_P (op1))
1541 newop1 = GEN_INT (bits - INTVAL (op1));
1542 else if (targetm.shift_truncation_mask (mode) == bits - 1)
1543 newop1 = negate_rtx (GET_MODE (op1), op1);
1544 else
1545 newop1 = expand_binop (GET_MODE (op1), sub_optab,
1546 GEN_INT (bits), op1,
1547 NULL_RTX, unsignedp, OPTAB_DIRECT);
1549 temp = expand_binop_directly (mode, otheroptab, op0, newop1,
1550 target, unsignedp, methods, last);
1551 if (temp)
1552 return temp;
1555 /* If this is a multiply, see if we can do a widening operation that
1556 takes operands of this mode and makes a wider mode. */
1558 if (binoptab == smul_optab
1559 && GET_MODE_2XWIDER_MODE (mode) != VOIDmode
1560 && (widening_optab_handler ((unsignedp ? umul_widen_optab
1561 : smul_widen_optab),
1562 GET_MODE_2XWIDER_MODE (mode), mode)
1563 != CODE_FOR_nothing))
1565 temp = expand_binop (GET_MODE_2XWIDER_MODE (mode),
1566 unsignedp ? umul_widen_optab : smul_widen_optab,
1567 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1569 if (temp != 0)
1571 if (GET_MODE_CLASS (mode) == MODE_INT
1572 && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (temp)))
1573 return gen_lowpart (mode, temp);
1574 else
1575 return convert_to_mode (mode, temp, unsignedp);
1579 /* If this is a vector shift by a scalar, see if we can do a vector
1580 shift by a vector. If so, broadcast the scalar into a vector. */
1581 if (mclass == MODE_VECTOR_INT)
1583 optab otheroptab = NULL;
1585 if (binoptab == ashl_optab)
1586 otheroptab = vashl_optab;
1587 else if (binoptab == ashr_optab)
1588 otheroptab = vashr_optab;
1589 else if (binoptab == lshr_optab)
1590 otheroptab = vlshr_optab;
1591 else if (binoptab == rotl_optab)
1592 otheroptab = vrotl_optab;
1593 else if (binoptab == rotr_optab)
1594 otheroptab = vrotr_optab;
1596 if (otheroptab && optab_handler (otheroptab, mode) != CODE_FOR_nothing)
1598 rtx vop1 = expand_vector_broadcast (mode, op1);
1599 if (vop1)
1601 temp = expand_binop_directly (mode, otheroptab, op0, vop1,
1602 target, unsignedp, methods, last);
1603 if (temp)
1604 return temp;
1609 /* Look for a wider mode of the same class for which we think we
1610 can open-code the operation. Check for a widening multiply at the
1611 wider mode as well. */
1613 if (CLASS_HAS_WIDER_MODES_P (mclass)
1614 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1615 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1616 wider_mode != VOIDmode;
1617 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1619 if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing
1620 || (binoptab == smul_optab
1621 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1622 && (find_widening_optab_handler ((unsignedp
1623 ? umul_widen_optab
1624 : smul_widen_optab),
1625 GET_MODE_WIDER_MODE (wider_mode),
1626 mode, 0)
1627 != CODE_FOR_nothing)))
1629 rtx xop0 = op0, xop1 = op1;
1630 int no_extend = 0;
1632 /* For certain integer operations, we need not actually extend
1633 the narrow operands, as long as we will truncate
1634 the results to the same narrowness. */
1636 if ((binoptab == ior_optab || binoptab == and_optab
1637 || binoptab == xor_optab
1638 || binoptab == add_optab || binoptab == sub_optab
1639 || binoptab == smul_optab || binoptab == ashl_optab)
1640 && mclass == MODE_INT)
1642 no_extend = 1;
1643 xop0 = avoid_expensive_constant (mode, binoptab, 0,
1644 xop0, unsignedp);
1645 if (binoptab != ashl_optab)
1646 xop1 = avoid_expensive_constant (mode, binoptab, 1,
1647 xop1, unsignedp);
1650 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1652 /* The second operand of a shift must always be extended. */
1653 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1654 no_extend && binoptab != ashl_optab);
1656 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1657 unsignedp, OPTAB_DIRECT);
1658 if (temp)
1660 if (mclass != MODE_INT
1661 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
1663 if (target == 0)
1664 target = gen_reg_rtx (mode);
1665 convert_move (target, temp, 0);
1666 return target;
1668 else
1669 return gen_lowpart (mode, temp);
1671 else
1672 delete_insns_since (last);
1676 /* If operation is commutative,
1677 try to make the first operand a register.
1678 Even better, try to make it the same as the target.
1679 Also try to make the last operand a constant. */
1680 if (commutative_optab_p (binoptab)
1681 && swap_commutative_operands_with_target (target, op0, op1))
1683 temp = op1;
1684 op1 = op0;
1685 op0 = temp;
1688 /* These can be done a word at a time. */
1689 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1690 && mclass == MODE_INT
1691 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1692 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1694 int i;
1695 rtx insns;
1697 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1698 won't be accurate, so use a new target. */
1699 if (target == 0
1700 || target == op0
1701 || target == op1
1702 || !valid_multiword_target_p (target))
1703 target = gen_reg_rtx (mode);
1705 start_sequence ();
1707 /* Do the actual arithmetic. */
1708 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1710 rtx target_piece = operand_subword (target, i, 1, mode);
1711 rtx x = expand_binop (word_mode, binoptab,
1712 operand_subword_force (op0, i, mode),
1713 operand_subword_force (op1, i, mode),
1714 target_piece, unsignedp, next_methods);
1716 if (x == 0)
1717 break;
1719 if (target_piece != x)
1720 emit_move_insn (target_piece, x);
1723 insns = get_insns ();
1724 end_sequence ();
1726 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1728 emit_insn (insns);
1729 return target;
1733 /* Synthesize double word shifts from single word shifts. */
1734 if ((binoptab == lshr_optab || binoptab == ashl_optab
1735 || binoptab == ashr_optab)
1736 && mclass == MODE_INT
1737 && (CONST_INT_P (op1) || optimize_insn_for_speed_p ())
1738 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1739 && GET_MODE_PRECISION (mode) == GET_MODE_BITSIZE (mode)
1740 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing
1741 && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1742 && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1744 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1745 enum machine_mode op1_mode;
1747 double_shift_mask = targetm.shift_truncation_mask (mode);
1748 shift_mask = targetm.shift_truncation_mask (word_mode);
1749 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1751 /* Apply the truncation to constant shifts. */
1752 if (double_shift_mask > 0 && CONST_INT_P (op1))
1753 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1755 if (op1 == CONST0_RTX (op1_mode))
1756 return op0;
1758 /* Make sure that this is a combination that expand_doubleword_shift
1759 can handle. See the comments there for details. */
1760 if (double_shift_mask == 0
1761 || (shift_mask == BITS_PER_WORD - 1
1762 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1764 rtx insns;
1765 rtx into_target, outof_target;
1766 rtx into_input, outof_input;
1767 int left_shift, outof_word;
1769 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1770 won't be accurate, so use a new target. */
1771 if (target == 0
1772 || target == op0
1773 || target == op1
1774 || !valid_multiword_target_p (target))
1775 target = gen_reg_rtx (mode);
1777 start_sequence ();
1779 /* OUTOF_* is the word we are shifting bits away from, and
1780 INTO_* is the word that we are shifting bits towards, thus
1781 they differ depending on the direction of the shift and
1782 WORDS_BIG_ENDIAN. */
1784 left_shift = binoptab == ashl_optab;
1785 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1787 outof_target = operand_subword (target, outof_word, 1, mode);
1788 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1790 outof_input = operand_subword_force (op0, outof_word, mode);
1791 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1793 if (expand_doubleword_shift (op1_mode, binoptab,
1794 outof_input, into_input, op1,
1795 outof_target, into_target,
1796 unsignedp, next_methods, shift_mask))
1798 insns = get_insns ();
1799 end_sequence ();
1801 emit_insn (insns);
1802 return target;
1804 end_sequence ();
1808 /* Synthesize double word rotates from single word shifts. */
1809 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1810 && mclass == MODE_INT
1811 && CONST_INT_P (op1)
1812 && GET_MODE_PRECISION (mode) == 2 * BITS_PER_WORD
1813 && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1814 && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1816 rtx insns;
1817 rtx into_target, outof_target;
1818 rtx into_input, outof_input;
1819 rtx inter;
1820 int shift_count, left_shift, outof_word;
1822 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1823 won't be accurate, so use a new target. Do this also if target is not
1824 a REG, first because having a register instead may open optimization
1825 opportunities, and second because if target and op0 happen to be MEMs
1826 designating the same location, we would risk clobbering it too early
1827 in the code sequence we generate below. */
1828 if (target == 0
1829 || target == op0
1830 || target == op1
1831 || !REG_P (target)
1832 || !valid_multiword_target_p (target))
1833 target = gen_reg_rtx (mode);
1835 start_sequence ();
1837 shift_count = INTVAL (op1);
1839 /* OUTOF_* is the word we are shifting bits away from, and
1840 INTO_* is the word that we are shifting bits towards, thus
1841 they differ depending on the direction of the shift and
1842 WORDS_BIG_ENDIAN. */
1844 left_shift = (binoptab == rotl_optab);
1845 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1847 outof_target = operand_subword (target, outof_word, 1, mode);
1848 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1850 outof_input = operand_subword_force (op0, outof_word, mode);
1851 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1853 if (shift_count == BITS_PER_WORD)
1855 /* This is just a word swap. */
1856 emit_move_insn (outof_target, into_input);
1857 emit_move_insn (into_target, outof_input);
1858 inter = const0_rtx;
1860 else
1862 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1863 rtx first_shift_count, second_shift_count;
1864 optab reverse_unsigned_shift, unsigned_shift;
1866 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1867 ? lshr_optab : ashl_optab);
1869 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1870 ? ashl_optab : lshr_optab);
1872 if (shift_count > BITS_PER_WORD)
1874 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1875 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1877 else
1879 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1880 second_shift_count = GEN_INT (shift_count);
1883 into_temp1 = expand_binop (word_mode, unsigned_shift,
1884 outof_input, first_shift_count,
1885 NULL_RTX, unsignedp, next_methods);
1886 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1887 into_input, second_shift_count,
1888 NULL_RTX, unsignedp, next_methods);
1890 if (into_temp1 != 0 && into_temp2 != 0)
1891 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1892 into_target, unsignedp, next_methods);
1893 else
1894 inter = 0;
1896 if (inter != 0 && inter != into_target)
1897 emit_move_insn (into_target, inter);
1899 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1900 into_input, first_shift_count,
1901 NULL_RTX, unsignedp, next_methods);
1902 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1903 outof_input, second_shift_count,
1904 NULL_RTX, unsignedp, next_methods);
1906 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1907 inter = expand_binop (word_mode, ior_optab,
1908 outof_temp1, outof_temp2,
1909 outof_target, unsignedp, next_methods);
1911 if (inter != 0 && inter != outof_target)
1912 emit_move_insn (outof_target, inter);
1915 insns = get_insns ();
1916 end_sequence ();
1918 if (inter != 0)
1920 emit_insn (insns);
1921 return target;
1925 /* These can be done a word at a time by propagating carries. */
1926 if ((binoptab == add_optab || binoptab == sub_optab)
1927 && mclass == MODE_INT
1928 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1929 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1931 unsigned int i;
1932 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1933 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1934 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1935 rtx xop0, xop1, xtarget;
1937 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1938 value is one of those, use it. Otherwise, use 1 since it is the
1939 one easiest to get. */
1940 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1941 int normalizep = STORE_FLAG_VALUE;
1942 #else
1943 int normalizep = 1;
1944 #endif
1946 /* Prepare the operands. */
1947 xop0 = force_reg (mode, op0);
1948 xop1 = force_reg (mode, op1);
1950 xtarget = gen_reg_rtx (mode);
1952 if (target == 0 || !REG_P (target) || !valid_multiword_target_p (target))
1953 target = xtarget;
1955 /* Indicate for flow that the entire target reg is being set. */
1956 if (REG_P (target))
1957 emit_clobber (xtarget);
1959 /* Do the actual arithmetic. */
1960 for (i = 0; i < nwords; i++)
1962 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1963 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1964 rtx op0_piece = operand_subword_force (xop0, index, mode);
1965 rtx op1_piece = operand_subword_force (xop1, index, mode);
1966 rtx x;
1968 /* Main add/subtract of the input operands. */
1969 x = expand_binop (word_mode, binoptab,
1970 op0_piece, op1_piece,
1971 target_piece, unsignedp, next_methods);
1972 if (x == 0)
1973 break;
1975 if (i + 1 < nwords)
1977 /* Store carry from main add/subtract. */
1978 carry_out = gen_reg_rtx (word_mode);
1979 carry_out = emit_store_flag_force (carry_out,
1980 (binoptab == add_optab
1981 ? LT : GT),
1982 x, op0_piece,
1983 word_mode, 1, normalizep);
1986 if (i > 0)
1988 rtx newx;
1990 /* Add/subtract previous carry to main result. */
1991 newx = expand_binop (word_mode,
1992 normalizep == 1 ? binoptab : otheroptab,
1993 x, carry_in,
1994 NULL_RTX, 1, next_methods);
1996 if (i + 1 < nwords)
1998 /* Get out carry from adding/subtracting carry in. */
1999 rtx carry_tmp = gen_reg_rtx (word_mode);
2000 carry_tmp = emit_store_flag_force (carry_tmp,
2001 (binoptab == add_optab
2002 ? LT : GT),
2003 newx, x,
2004 word_mode, 1, normalizep);
2006 /* Logical-ior the two poss. carry together. */
2007 carry_out = expand_binop (word_mode, ior_optab,
2008 carry_out, carry_tmp,
2009 carry_out, 0, next_methods);
2010 if (carry_out == 0)
2011 break;
2013 emit_move_insn (target_piece, newx);
2015 else
2017 if (x != target_piece)
2018 emit_move_insn (target_piece, x);
2021 carry_in = carry_out;
2024 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
2026 if (optab_handler (mov_optab, mode) != CODE_FOR_nothing
2027 || ! rtx_equal_p (target, xtarget))
2029 rtx temp = emit_move_insn (target, xtarget);
2031 set_dst_reg_note (temp, REG_EQUAL,
2032 gen_rtx_fmt_ee (binoptab->code, mode,
2033 copy_rtx (xop0),
2034 copy_rtx (xop1)),
2035 target);
2037 else
2038 target = xtarget;
2040 return target;
2043 else
2044 delete_insns_since (last);
2047 /* Attempt to synthesize double word multiplies using a sequence of word
2048 mode multiplications. We first attempt to generate a sequence using a
2049 more efficient unsigned widening multiply, and if that fails we then
2050 try using a signed widening multiply. */
2052 if (binoptab == smul_optab
2053 && mclass == MODE_INT
2054 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2055 && optab_handler (smul_optab, word_mode) != CODE_FOR_nothing
2056 && optab_handler (add_optab, word_mode) != CODE_FOR_nothing)
2058 rtx product = NULL_RTX;
2059 if (widening_optab_handler (umul_widen_optab, mode, word_mode)
2060 != CODE_FOR_nothing)
2062 product = expand_doubleword_mult (mode, op0, op1, target,
2063 true, methods);
2064 if (!product)
2065 delete_insns_since (last);
2068 if (product == NULL_RTX
2069 && widening_optab_handler (smul_widen_optab, mode, word_mode)
2070 != CODE_FOR_nothing)
2072 product = expand_doubleword_mult (mode, op0, op1, target,
2073 false, methods);
2074 if (!product)
2075 delete_insns_since (last);
2078 if (product != NULL_RTX)
2080 if (optab_handler (mov_optab, mode) != CODE_FOR_nothing)
2082 temp = emit_move_insn (target ? target : product, product);
2083 set_dst_reg_note (temp,
2084 REG_EQUAL,
2085 gen_rtx_fmt_ee (MULT, mode,
2086 copy_rtx (op0),
2087 copy_rtx (op1)),
2088 target ? target : product);
2090 return product;
2094 /* It can't be open-coded in this mode.
2095 Use a library call if one is available and caller says that's ok. */
2097 libfunc = optab_libfunc (binoptab, mode);
2098 if (libfunc
2099 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
2101 rtx insns;
2102 rtx op1x = op1;
2103 enum machine_mode op1_mode = mode;
2104 rtx value;
2106 start_sequence ();
2108 if (shift_optab_p (binoptab))
2110 op1_mode = targetm.libgcc_shift_count_mode ();
2111 /* Specify unsigned here,
2112 since negative shift counts are meaningless. */
2113 op1x = convert_to_mode (op1_mode, op1, 1);
2116 if (GET_MODE (op0) != VOIDmode
2117 && GET_MODE (op0) != mode)
2118 op0 = convert_to_mode (mode, op0, unsignedp);
2120 /* Pass 1 for NO_QUEUE so we don't lose any increments
2121 if the libcall is cse'd or moved. */
2122 value = emit_library_call_value (libfunc,
2123 NULL_RTX, LCT_CONST, mode, 2,
2124 op0, mode, op1x, op1_mode);
2126 insns = get_insns ();
2127 end_sequence ();
2129 target = gen_reg_rtx (mode);
2130 emit_libcall_block_1 (insns, target, value,
2131 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1),
2132 trapv_binoptab_p (binoptab));
2134 return target;
2137 delete_insns_since (last);
2139 /* It can't be done in this mode. Can we do it in a wider mode? */
2141 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
2142 || methods == OPTAB_MUST_WIDEN))
2144 /* Caller says, don't even try. */
2145 delete_insns_since (entry_last);
2146 return 0;
2149 /* Compute the value of METHODS to pass to recursive calls.
2150 Don't allow widening to be tried recursively. */
2152 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
2154 /* Look for a wider mode of the same class for which it appears we can do
2155 the operation. */
2157 if (CLASS_HAS_WIDER_MODES_P (mclass))
2159 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2160 wider_mode != VOIDmode;
2161 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2163 if (find_widening_optab_handler (binoptab, wider_mode, mode, 1)
2164 != CODE_FOR_nothing
2165 || (methods == OPTAB_LIB
2166 && optab_libfunc (binoptab, wider_mode)))
2168 rtx xop0 = op0, xop1 = op1;
2169 int no_extend = 0;
2171 /* For certain integer operations, we need not actually extend
2172 the narrow operands, as long as we will truncate
2173 the results to the same narrowness. */
2175 if ((binoptab == ior_optab || binoptab == and_optab
2176 || binoptab == xor_optab
2177 || binoptab == add_optab || binoptab == sub_optab
2178 || binoptab == smul_optab || binoptab == ashl_optab)
2179 && mclass == MODE_INT)
2180 no_extend = 1;
2182 xop0 = widen_operand (xop0, wider_mode, mode,
2183 unsignedp, no_extend);
2185 /* The second operand of a shift must always be extended. */
2186 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
2187 no_extend && binoptab != ashl_optab);
2189 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
2190 unsignedp, methods);
2191 if (temp)
2193 if (mclass != MODE_INT
2194 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
2196 if (target == 0)
2197 target = gen_reg_rtx (mode);
2198 convert_move (target, temp, 0);
2199 return target;
2201 else
2202 return gen_lowpart (mode, temp);
2204 else
2205 delete_insns_since (last);
2210 delete_insns_since (entry_last);
2211 return 0;
2214 /* Expand a binary operator which has both signed and unsigned forms.
2215 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2216 signed operations.
2218 If we widen unsigned operands, we may use a signed wider operation instead
2219 of an unsigned wider operation, since the result would be the same. */
2222 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
2223 rtx op0, rtx op1, rtx target, int unsignedp,
2224 enum optab_methods methods)
2226 rtx temp;
2227 optab direct_optab = unsignedp ? uoptab : soptab;
2228 struct optab_d wide_soptab;
2230 /* Do it without widening, if possible. */
2231 temp = expand_binop (mode, direct_optab, op0, op1, target,
2232 unsignedp, OPTAB_DIRECT);
2233 if (temp || methods == OPTAB_DIRECT)
2234 return temp;
2236 /* Try widening to a signed int. Make a fake signed optab that
2237 hides any signed insn for direct use. */
2238 wide_soptab = *soptab;
2239 set_optab_handler (&wide_soptab, mode, CODE_FOR_nothing);
2240 /* We don't want to generate new hash table entries from this fake
2241 optab. */
2242 wide_soptab.libcall_gen = NULL;
2244 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2245 unsignedp, OPTAB_WIDEN);
2247 /* For unsigned operands, try widening to an unsigned int. */
2248 if (temp == 0 && unsignedp)
2249 temp = expand_binop (mode, uoptab, op0, op1, target,
2250 unsignedp, OPTAB_WIDEN);
2251 if (temp || methods == OPTAB_WIDEN)
2252 return temp;
2254 /* Use the right width libcall if that exists. */
2255 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
2256 if (temp || methods == OPTAB_LIB)
2257 return temp;
2259 /* Must widen and use a libcall, use either signed or unsigned. */
2260 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2261 unsignedp, methods);
2262 if (temp != 0)
2263 return temp;
2264 if (unsignedp)
2265 return expand_binop (mode, uoptab, op0, op1, target,
2266 unsignedp, methods);
2267 return 0;
2270 /* Generate code to perform an operation specified by UNOPPTAB
2271 on operand OP0, with two results to TARG0 and TARG1.
2272 We assume that the order of the operands for the instruction
2273 is TARG0, TARG1, OP0.
2275 Either TARG0 or TARG1 may be zero, but what that means is that
2276 the result is not actually wanted. We will generate it into
2277 a dummy pseudo-reg and discard it. They may not both be zero.
2279 Returns 1 if this operation can be performed; 0 if not. */
2282 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
2283 int unsignedp)
2285 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2286 enum mode_class mclass;
2287 enum machine_mode wider_mode;
2288 rtx entry_last = get_last_insn ();
2289 rtx last;
2291 mclass = GET_MODE_CLASS (mode);
2293 if (!targ0)
2294 targ0 = gen_reg_rtx (mode);
2295 if (!targ1)
2296 targ1 = gen_reg_rtx (mode);
2298 /* Record where to go back to if we fail. */
2299 last = get_last_insn ();
2301 if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
2303 struct expand_operand ops[3];
2304 enum insn_code icode = optab_handler (unoptab, mode);
2306 create_fixed_operand (&ops[0], targ0);
2307 create_fixed_operand (&ops[1], targ1);
2308 create_convert_operand_from (&ops[2], op0, mode, unsignedp);
2309 if (maybe_expand_insn (icode, 3, ops))
2310 return 1;
2313 /* It can't be done in this mode. Can we do it in a wider mode? */
2315 if (CLASS_HAS_WIDER_MODES_P (mclass))
2317 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2318 wider_mode != VOIDmode;
2319 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2321 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2323 rtx t0 = gen_reg_rtx (wider_mode);
2324 rtx t1 = gen_reg_rtx (wider_mode);
2325 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2327 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
2329 convert_move (targ0, t0, unsignedp);
2330 convert_move (targ1, t1, unsignedp);
2331 return 1;
2333 else
2334 delete_insns_since (last);
2339 delete_insns_since (entry_last);
2340 return 0;
2343 /* Generate code to perform an operation specified by BINOPTAB
2344 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2345 We assume that the order of the operands for the instruction
2346 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2347 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2349 Either TARG0 or TARG1 may be zero, but what that means is that
2350 the result is not actually wanted. We will generate it into
2351 a dummy pseudo-reg and discard it. They may not both be zero.
2353 Returns 1 if this operation can be performed; 0 if not. */
2356 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2357 int unsignedp)
2359 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2360 enum mode_class mclass;
2361 enum machine_mode wider_mode;
2362 rtx entry_last = get_last_insn ();
2363 rtx last;
2365 mclass = GET_MODE_CLASS (mode);
2367 if (!targ0)
2368 targ0 = gen_reg_rtx (mode);
2369 if (!targ1)
2370 targ1 = gen_reg_rtx (mode);
2372 /* Record where to go back to if we fail. */
2373 last = get_last_insn ();
2375 if (optab_handler (binoptab, mode) != CODE_FOR_nothing)
2377 struct expand_operand ops[4];
2378 enum insn_code icode = optab_handler (binoptab, mode);
2379 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2380 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
2381 rtx xop0 = op0, xop1 = op1;
2383 /* If we are optimizing, force expensive constants into a register. */
2384 xop0 = avoid_expensive_constant (mode0, binoptab, 0, xop0, unsignedp);
2385 xop1 = avoid_expensive_constant (mode1, binoptab, 1, xop1, unsignedp);
2387 create_fixed_operand (&ops[0], targ0);
2388 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
2389 create_convert_operand_from (&ops[2], op1, mode, unsignedp);
2390 create_fixed_operand (&ops[3], targ1);
2391 if (maybe_expand_insn (icode, 4, ops))
2392 return 1;
2393 delete_insns_since (last);
2396 /* It can't be done in this mode. Can we do it in a wider mode? */
2398 if (CLASS_HAS_WIDER_MODES_P (mclass))
2400 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2401 wider_mode != VOIDmode;
2402 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2404 if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing)
2406 rtx t0 = gen_reg_rtx (wider_mode);
2407 rtx t1 = gen_reg_rtx (wider_mode);
2408 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2409 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2411 if (expand_twoval_binop (binoptab, cop0, cop1,
2412 t0, t1, unsignedp))
2414 convert_move (targ0, t0, unsignedp);
2415 convert_move (targ1, t1, unsignedp);
2416 return 1;
2418 else
2419 delete_insns_since (last);
2424 delete_insns_since (entry_last);
2425 return 0;
2428 /* Expand the two-valued library call indicated by BINOPTAB, but
2429 preserve only one of the values. If TARG0 is non-NULL, the first
2430 value is placed into TARG0; otherwise the second value is placed
2431 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2432 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2433 This routine assumes that the value returned by the library call is
2434 as if the return value was of an integral mode twice as wide as the
2435 mode of OP0. Returns 1 if the call was successful. */
2437 bool
2438 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2439 rtx targ0, rtx targ1, enum rtx_code code)
2441 enum machine_mode mode;
2442 enum machine_mode libval_mode;
2443 rtx libval;
2444 rtx insns;
2445 rtx libfunc;
2447 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2448 gcc_assert (!targ0 != !targ1);
2450 mode = GET_MODE (op0);
2451 libfunc = optab_libfunc (binoptab, mode);
2452 if (!libfunc)
2453 return false;
2455 /* The value returned by the library function will have twice as
2456 many bits as the nominal MODE. */
2457 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2458 MODE_INT);
2459 start_sequence ();
2460 libval = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
2461 libval_mode, 2,
2462 op0, mode,
2463 op1, mode);
2464 /* Get the part of VAL containing the value that we want. */
2465 libval = simplify_gen_subreg (mode, libval, libval_mode,
2466 targ0 ? 0 : GET_MODE_SIZE (mode));
2467 insns = get_insns ();
2468 end_sequence ();
2469 /* Move the into the desired location. */
2470 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2471 gen_rtx_fmt_ee (code, mode, op0, op1));
2473 return true;
2477 /* Wrapper around expand_unop which takes an rtx code to specify
2478 the operation to perform, not an optab pointer. All other
2479 arguments are the same. */
2481 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2482 rtx target, int unsignedp)
2484 optab unop = code_to_optab[(int) code];
2485 gcc_assert (unop);
2487 return expand_unop (mode, unop, op0, target, unsignedp);
2490 /* Try calculating
2491 (clz:narrow x)
2493 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)).
2495 A similar operation can be used for clrsb. UNOPTAB says which operation
2496 we are trying to expand. */
2497 static rtx
2498 widen_leading (enum machine_mode mode, rtx op0, rtx target, optab unoptab)
2500 enum mode_class mclass = GET_MODE_CLASS (mode);
2501 if (CLASS_HAS_WIDER_MODES_P (mclass))
2503 enum machine_mode wider_mode;
2504 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2505 wider_mode != VOIDmode;
2506 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2508 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2510 rtx xop0, temp, last;
2512 last = get_last_insn ();
2514 if (target == 0)
2515 target = gen_reg_rtx (mode);
2516 xop0 = widen_operand (op0, wider_mode, mode,
2517 unoptab != clrsb_optab, false);
2518 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2519 unoptab != clrsb_optab);
2520 if (temp != 0)
2521 temp = expand_binop (wider_mode, sub_optab, temp,
2522 GEN_INT (GET_MODE_PRECISION (wider_mode)
2523 - GET_MODE_PRECISION (mode)),
2524 target, true, OPTAB_DIRECT);
2525 if (temp == 0)
2526 delete_insns_since (last);
2528 return temp;
2532 return 0;
2535 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2536 quantities, choosing which based on whether the high word is nonzero. */
2537 static rtx
2538 expand_doubleword_clz (enum machine_mode mode, rtx op0, rtx target)
2540 rtx xop0 = force_reg (mode, op0);
2541 rtx subhi = gen_highpart (word_mode, xop0);
2542 rtx sublo = gen_lowpart (word_mode, xop0);
2543 rtx hi0_label = gen_label_rtx ();
2544 rtx after_label = gen_label_rtx ();
2545 rtx seq, temp, result;
2547 /* If we were not given a target, use a word_mode register, not a
2548 'mode' register. The result will fit, and nobody is expecting
2549 anything bigger (the return type of __builtin_clz* is int). */
2550 if (!target)
2551 target = gen_reg_rtx (word_mode);
2553 /* In any case, write to a word_mode scratch in both branches of the
2554 conditional, so we can ensure there is a single move insn setting
2555 'target' to tag a REG_EQUAL note on. */
2556 result = gen_reg_rtx (word_mode);
2558 start_sequence ();
2560 /* If the high word is not equal to zero,
2561 then clz of the full value is clz of the high word. */
2562 emit_cmp_and_jump_insns (subhi, CONST0_RTX (word_mode), EQ, 0,
2563 word_mode, true, hi0_label);
2565 temp = expand_unop_direct (word_mode, clz_optab, subhi, result, true);
2566 if (!temp)
2567 goto fail;
2569 if (temp != result)
2570 convert_move (result, temp, true);
2572 emit_jump_insn (gen_jump (after_label));
2573 emit_barrier ();
2575 /* Else clz of the full value is clz of the low word plus the number
2576 of bits in the high word. */
2577 emit_label (hi0_label);
2579 temp = expand_unop_direct (word_mode, clz_optab, sublo, 0, true);
2580 if (!temp)
2581 goto fail;
2582 temp = expand_binop (word_mode, add_optab, temp,
2583 GEN_INT (GET_MODE_BITSIZE (word_mode)),
2584 result, true, OPTAB_DIRECT);
2585 if (!temp)
2586 goto fail;
2587 if (temp != result)
2588 convert_move (result, temp, true);
2590 emit_label (after_label);
2591 convert_move (target, result, true);
2593 seq = get_insns ();
2594 end_sequence ();
2596 add_equal_note (seq, target, CLZ, xop0, 0);
2597 emit_insn (seq);
2598 return target;
2600 fail:
2601 end_sequence ();
2602 return 0;
2605 /* Try calculating
2606 (bswap:narrow x)
2608 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2609 static rtx
2610 widen_bswap (enum machine_mode mode, rtx op0, rtx target)
2612 enum mode_class mclass = GET_MODE_CLASS (mode);
2613 enum machine_mode wider_mode;
2614 rtx x, last;
2616 if (!CLASS_HAS_WIDER_MODES_P (mclass))
2617 return NULL_RTX;
2619 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2620 wider_mode != VOIDmode;
2621 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2622 if (optab_handler (bswap_optab, wider_mode) != CODE_FOR_nothing)
2623 goto found;
2624 return NULL_RTX;
2626 found:
2627 last = get_last_insn ();
2629 x = widen_operand (op0, wider_mode, mode, true, true);
2630 x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true);
2632 gcc_assert (GET_MODE_PRECISION (wider_mode) == GET_MODE_BITSIZE (wider_mode)
2633 && GET_MODE_PRECISION (mode) == GET_MODE_BITSIZE (mode));
2634 if (x != 0)
2635 x = expand_shift (RSHIFT_EXPR, wider_mode, x,
2636 GET_MODE_BITSIZE (wider_mode)
2637 - GET_MODE_BITSIZE (mode),
2638 NULL_RTX, true);
2640 if (x != 0)
2642 if (target == 0)
2643 target = gen_reg_rtx (mode);
2644 emit_move_insn (target, gen_lowpart (mode, x));
2646 else
2647 delete_insns_since (last);
2649 return target;
2652 /* Try calculating bswap as two bswaps of two word-sized operands. */
2654 static rtx
2655 expand_doubleword_bswap (enum machine_mode mode, rtx op, rtx target)
2657 rtx t0, t1;
2659 t1 = expand_unop (word_mode, bswap_optab,
2660 operand_subword_force (op, 0, mode), NULL_RTX, true);
2661 t0 = expand_unop (word_mode, bswap_optab,
2662 operand_subword_force (op, 1, mode), NULL_RTX, true);
2664 if (target == 0 || !valid_multiword_target_p (target))
2665 target = gen_reg_rtx (mode);
2666 if (REG_P (target))
2667 emit_clobber (target);
2668 emit_move_insn (operand_subword (target, 0, 1, mode), t0);
2669 emit_move_insn (operand_subword (target, 1, 1, mode), t1);
2671 return target;
2674 /* Try calculating (parity x) as (and (popcount x) 1), where
2675 popcount can also be done in a wider mode. */
2676 static rtx
2677 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2679 enum mode_class mclass = GET_MODE_CLASS (mode);
2680 if (CLASS_HAS_WIDER_MODES_P (mclass))
2682 enum machine_mode wider_mode;
2683 for (wider_mode = mode; wider_mode != VOIDmode;
2684 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2686 if (optab_handler (popcount_optab, wider_mode) != CODE_FOR_nothing)
2688 rtx xop0, temp, last;
2690 last = get_last_insn ();
2692 if (target == 0)
2693 target = gen_reg_rtx (mode);
2694 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2695 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2696 true);
2697 if (temp != 0)
2698 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2699 target, true, OPTAB_DIRECT);
2700 if (temp == 0)
2701 delete_insns_since (last);
2703 return temp;
2707 return 0;
2710 /* Try calculating ctz(x) as K - clz(x & -x) ,
2711 where K is GET_MODE_PRECISION(mode) - 1.
2713 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2714 don't have to worry about what the hardware does in that case. (If
2715 the clz instruction produces the usual value at 0, which is K, the
2716 result of this code sequence will be -1; expand_ffs, below, relies
2717 on this. It might be nice to have it be K instead, for consistency
2718 with the (very few) processors that provide a ctz with a defined
2719 value, but that would take one more instruction, and it would be
2720 less convenient for expand_ffs anyway. */
2722 static rtx
2723 expand_ctz (enum machine_mode mode, rtx op0, rtx target)
2725 rtx seq, temp;
2727 if (optab_handler (clz_optab, mode) == CODE_FOR_nothing)
2728 return 0;
2730 start_sequence ();
2732 temp = expand_unop_direct (mode, neg_optab, op0, NULL_RTX, true);
2733 if (temp)
2734 temp = expand_binop (mode, and_optab, op0, temp, NULL_RTX,
2735 true, OPTAB_DIRECT);
2736 if (temp)
2737 temp = expand_unop_direct (mode, clz_optab, temp, NULL_RTX, true);
2738 if (temp)
2739 temp = expand_binop (mode, sub_optab, GEN_INT (GET_MODE_PRECISION (mode) - 1),
2740 temp, target,
2741 true, OPTAB_DIRECT);
2742 if (temp == 0)
2744 end_sequence ();
2745 return 0;
2748 seq = get_insns ();
2749 end_sequence ();
2751 add_equal_note (seq, temp, CTZ, op0, 0);
2752 emit_insn (seq);
2753 return temp;
2757 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2758 else with the sequence used by expand_clz.
2760 The ffs builtin promises to return zero for a zero value and ctz/clz
2761 may have an undefined value in that case. If they do not give us a
2762 convenient value, we have to generate a test and branch. */
2763 static rtx
2764 expand_ffs (enum machine_mode mode, rtx op0, rtx target)
2766 HOST_WIDE_INT val = 0;
2767 bool defined_at_zero = false;
2768 rtx temp, seq;
2770 if (optab_handler (ctz_optab, mode) != CODE_FOR_nothing)
2772 start_sequence ();
2774 temp = expand_unop_direct (mode, ctz_optab, op0, 0, true);
2775 if (!temp)
2776 goto fail;
2778 defined_at_zero = (CTZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2);
2780 else if (optab_handler (clz_optab, mode) != CODE_FOR_nothing)
2782 start_sequence ();
2783 temp = expand_ctz (mode, op0, 0);
2784 if (!temp)
2785 goto fail;
2787 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2)
2789 defined_at_zero = true;
2790 val = (GET_MODE_PRECISION (mode) - 1) - val;
2793 else
2794 return 0;
2796 if (defined_at_zero && val == -1)
2797 /* No correction needed at zero. */;
2798 else
2800 /* We don't try to do anything clever with the situation found
2801 on some processors (eg Alpha) where ctz(0:mode) ==
2802 bitsize(mode). If someone can think of a way to send N to -1
2803 and leave alone all values in the range 0..N-1 (where N is a
2804 power of two), cheaper than this test-and-branch, please add it.
2806 The test-and-branch is done after the operation itself, in case
2807 the operation sets condition codes that can be recycled for this.
2808 (This is true on i386, for instance.) */
2810 rtx nonzero_label = gen_label_rtx ();
2811 emit_cmp_and_jump_insns (op0, CONST0_RTX (mode), NE, 0,
2812 mode, true, nonzero_label);
2814 convert_move (temp, GEN_INT (-1), false);
2815 emit_label (nonzero_label);
2818 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2819 to produce a value in the range 0..bitsize. */
2820 temp = expand_binop (mode, add_optab, temp, GEN_INT (1),
2821 target, false, OPTAB_DIRECT);
2822 if (!temp)
2823 goto fail;
2825 seq = get_insns ();
2826 end_sequence ();
2828 add_equal_note (seq, temp, FFS, op0, 0);
2829 emit_insn (seq);
2830 return temp;
2832 fail:
2833 end_sequence ();
2834 return 0;
2837 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2838 conditions, VAL may already be a SUBREG against which we cannot generate
2839 a further SUBREG. In this case, we expect forcing the value into a
2840 register will work around the situation. */
2842 static rtx
2843 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2844 enum machine_mode imode)
2846 rtx ret;
2847 ret = lowpart_subreg (omode, val, imode);
2848 if (ret == NULL)
2850 val = force_reg (imode, val);
2851 ret = lowpart_subreg (omode, val, imode);
2852 gcc_assert (ret != NULL);
2854 return ret;
2857 /* Expand a floating point absolute value or negation operation via a
2858 logical operation on the sign bit. */
2860 static rtx
2861 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2862 rtx op0, rtx target)
2864 const struct real_format *fmt;
2865 int bitpos, word, nwords, i;
2866 enum machine_mode imode;
2867 double_int mask;
2868 rtx temp, insns;
2870 /* The format has to have a simple sign bit. */
2871 fmt = REAL_MODE_FORMAT (mode);
2872 if (fmt == NULL)
2873 return NULL_RTX;
2875 bitpos = fmt->signbit_rw;
2876 if (bitpos < 0)
2877 return NULL_RTX;
2879 /* Don't create negative zeros if the format doesn't support them. */
2880 if (code == NEG && !fmt->has_signed_zero)
2881 return NULL_RTX;
2883 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2885 imode = int_mode_for_mode (mode);
2886 if (imode == BLKmode)
2887 return NULL_RTX;
2888 word = 0;
2889 nwords = 1;
2891 else
2893 imode = word_mode;
2895 if (FLOAT_WORDS_BIG_ENDIAN)
2896 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2897 else
2898 word = bitpos / BITS_PER_WORD;
2899 bitpos = bitpos % BITS_PER_WORD;
2900 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2903 mask = double_int_setbit (double_int_zero, bitpos);
2904 if (code == ABS)
2905 mask = double_int_not (mask);
2907 if (target == 0
2908 || target == op0
2909 || (nwords > 1 && !valid_multiword_target_p (target)))
2910 target = gen_reg_rtx (mode);
2912 if (nwords > 1)
2914 start_sequence ();
2916 for (i = 0; i < nwords; ++i)
2918 rtx targ_piece = operand_subword (target, i, 1, mode);
2919 rtx op0_piece = operand_subword_force (op0, i, mode);
2921 if (i == word)
2923 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2924 op0_piece,
2925 immed_double_int_const (mask, imode),
2926 targ_piece, 1, OPTAB_LIB_WIDEN);
2927 if (temp != targ_piece)
2928 emit_move_insn (targ_piece, temp);
2930 else
2931 emit_move_insn (targ_piece, op0_piece);
2934 insns = get_insns ();
2935 end_sequence ();
2937 emit_insn (insns);
2939 else
2941 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2942 gen_lowpart (imode, op0),
2943 immed_double_int_const (mask, imode),
2944 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2945 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2947 set_dst_reg_note (get_last_insn (), REG_EQUAL,
2948 gen_rtx_fmt_e (code, mode, copy_rtx (op0)),
2949 target);
2952 return target;
2955 /* As expand_unop, but will fail rather than attempt the operation in a
2956 different mode or with a libcall. */
2957 static rtx
2958 expand_unop_direct (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
2959 int unsignedp)
2961 if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
2963 struct expand_operand ops[2];
2964 enum insn_code icode = optab_handler (unoptab, mode);
2965 rtx last = get_last_insn ();
2966 rtx pat;
2968 create_output_operand (&ops[0], target, mode);
2969 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
2970 pat = maybe_gen_insn (icode, 2, ops);
2971 if (pat)
2973 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2974 && ! add_equal_note (pat, ops[0].value, unoptab->code,
2975 ops[1].value, NULL_RTX))
2977 delete_insns_since (last);
2978 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2981 emit_insn (pat);
2983 return ops[0].value;
2986 return 0;
2989 /* Generate code to perform an operation specified by UNOPTAB
2990 on operand OP0, with result having machine-mode MODE.
2992 UNSIGNEDP is for the case where we have to widen the operands
2993 to perform the operation. It says to use zero-extension.
2995 If TARGET is nonzero, the value
2996 is generated there, if it is convenient to do so.
2997 In all cases an rtx is returned for the locus of the value;
2998 this may or may not be TARGET. */
3001 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
3002 int unsignedp)
3004 enum mode_class mclass = GET_MODE_CLASS (mode);
3005 enum machine_mode wider_mode;
3006 rtx temp;
3007 rtx libfunc;
3009 temp = expand_unop_direct (mode, unoptab, op0, target, unsignedp);
3010 if (temp)
3011 return temp;
3013 /* It can't be done in this mode. Can we open-code it in a wider mode? */
3015 /* Widening (or narrowing) clz needs special treatment. */
3016 if (unoptab == clz_optab)
3018 temp = widen_leading (mode, op0, target, unoptab);
3019 if (temp)
3020 return temp;
3022 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
3023 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
3025 temp = expand_doubleword_clz (mode, op0, target);
3026 if (temp)
3027 return temp;
3030 goto try_libcall;
3033 if (unoptab == clrsb_optab)
3035 temp = widen_leading (mode, op0, target, unoptab);
3036 if (temp)
3037 return temp;
3038 goto try_libcall;
3041 /* Widening (or narrowing) bswap needs special treatment. */
3042 if (unoptab == bswap_optab)
3044 /* HImode is special because in this mode BSWAP is equivalent to ROTATE
3045 or ROTATERT. First try these directly; if this fails, then try the
3046 obvious pair of shifts with allowed widening, as this will probably
3047 be always more efficient than the other fallback methods. */
3048 if (mode == HImode)
3050 rtx last, temp1, temp2;
3052 if (optab_handler (rotl_optab, mode) != CODE_FOR_nothing)
3054 temp = expand_binop (mode, rotl_optab, op0, GEN_INT (8), target,
3055 unsignedp, OPTAB_DIRECT);
3056 if (temp)
3057 return temp;
3060 if (optab_handler (rotr_optab, mode) != CODE_FOR_nothing)
3062 temp = expand_binop (mode, rotr_optab, op0, GEN_INT (8), target,
3063 unsignedp, OPTAB_DIRECT);
3064 if (temp)
3065 return temp;
3068 last = get_last_insn ();
3070 temp1 = expand_binop (mode, ashl_optab, op0, GEN_INT (8), NULL_RTX,
3071 unsignedp, OPTAB_WIDEN);
3072 temp2 = expand_binop (mode, lshr_optab, op0, GEN_INT (8), NULL_RTX,
3073 unsignedp, OPTAB_WIDEN);
3074 if (temp1 && temp2)
3076 temp = expand_binop (mode, ior_optab, temp1, temp2, target,
3077 unsignedp, OPTAB_WIDEN);
3078 if (temp)
3079 return temp;
3082 delete_insns_since (last);
3085 temp = widen_bswap (mode, op0, target);
3086 if (temp)
3087 return temp;
3089 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
3090 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
3092 temp = expand_doubleword_bswap (mode, op0, target);
3093 if (temp)
3094 return temp;
3097 goto try_libcall;
3100 if (CLASS_HAS_WIDER_MODES_P (mclass))
3101 for (wider_mode = GET_MODE_WIDER_MODE (mode);
3102 wider_mode != VOIDmode;
3103 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
3105 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
3107 rtx xop0 = op0;
3108 rtx last = get_last_insn ();
3110 /* For certain operations, we need not actually extend
3111 the narrow operand, as long as we will truncate the
3112 results to the same narrowness. */
3114 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3115 (unoptab == neg_optab
3116 || unoptab == one_cmpl_optab)
3117 && mclass == MODE_INT);
3119 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3120 unsignedp);
3122 if (temp)
3124 if (mclass != MODE_INT
3125 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
3127 if (target == 0)
3128 target = gen_reg_rtx (mode);
3129 convert_move (target, temp, 0);
3130 return target;
3132 else
3133 return gen_lowpart (mode, temp);
3135 else
3136 delete_insns_since (last);
3140 /* These can be done a word at a time. */
3141 if (unoptab == one_cmpl_optab
3142 && mclass == MODE_INT
3143 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
3144 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
3146 int i;
3147 rtx insns;
3149 if (target == 0 || target == op0 || !valid_multiword_target_p (target))
3150 target = gen_reg_rtx (mode);
3152 start_sequence ();
3154 /* Do the actual arithmetic. */
3155 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
3157 rtx target_piece = operand_subword (target, i, 1, mode);
3158 rtx x = expand_unop (word_mode, unoptab,
3159 operand_subword_force (op0, i, mode),
3160 target_piece, unsignedp);
3162 if (target_piece != x)
3163 emit_move_insn (target_piece, x);
3166 insns = get_insns ();
3167 end_sequence ();
3169 emit_insn (insns);
3170 return target;
3173 if (unoptab->code == NEG)
3175 /* Try negating floating point values by flipping the sign bit. */
3176 if (SCALAR_FLOAT_MODE_P (mode))
3178 temp = expand_absneg_bit (NEG, mode, op0, target);
3179 if (temp)
3180 return temp;
3183 /* If there is no negation pattern, and we have no negative zero,
3184 try subtracting from zero. */
3185 if (!HONOR_SIGNED_ZEROS (mode))
3187 temp = expand_binop (mode, (unoptab == negv_optab
3188 ? subv_optab : sub_optab),
3189 CONST0_RTX (mode), op0, target,
3190 unsignedp, OPTAB_DIRECT);
3191 if (temp)
3192 return temp;
3196 /* Try calculating parity (x) as popcount (x) % 2. */
3197 if (unoptab == parity_optab)
3199 temp = expand_parity (mode, op0, target);
3200 if (temp)
3201 return temp;
3204 /* Try implementing ffs (x) in terms of clz (x). */
3205 if (unoptab == ffs_optab)
3207 temp = expand_ffs (mode, op0, target);
3208 if (temp)
3209 return temp;
3212 /* Try implementing ctz (x) in terms of clz (x). */
3213 if (unoptab == ctz_optab)
3215 temp = expand_ctz (mode, op0, target);
3216 if (temp)
3217 return temp;
3220 try_libcall:
3221 /* Now try a library call in this mode. */
3222 libfunc = optab_libfunc (unoptab, mode);
3223 if (libfunc)
3225 rtx insns;
3226 rtx value;
3227 rtx eq_value;
3228 enum machine_mode outmode = mode;
3230 /* All of these functions return small values. Thus we choose to
3231 have them return something that isn't a double-word. */
3232 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
3233 || unoptab == clrsb_optab || unoptab == popcount_optab
3234 || unoptab == parity_optab)
3235 outmode
3236 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node),
3237 optab_libfunc (unoptab, mode)));
3239 start_sequence ();
3241 /* Pass 1 for NO_QUEUE so we don't lose any increments
3242 if the libcall is cse'd or moved. */
3243 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, outmode,
3244 1, op0, mode);
3245 insns = get_insns ();
3246 end_sequence ();
3248 target = gen_reg_rtx (outmode);
3249 eq_value = gen_rtx_fmt_e (unoptab->code, mode, op0);
3250 if (GET_MODE_SIZE (outmode) < GET_MODE_SIZE (mode))
3251 eq_value = simplify_gen_unary (TRUNCATE, outmode, eq_value, mode);
3252 else if (GET_MODE_SIZE (outmode) > GET_MODE_SIZE (mode))
3253 eq_value = simplify_gen_unary (ZERO_EXTEND, outmode, eq_value, mode);
3254 emit_libcall_block_1 (insns, target, value, eq_value,
3255 trapv_unoptab_p (unoptab));
3257 return target;
3260 /* It can't be done in this mode. Can we do it in a wider mode? */
3262 if (CLASS_HAS_WIDER_MODES_P (mclass))
3264 for (wider_mode = GET_MODE_WIDER_MODE (mode);
3265 wider_mode != VOIDmode;
3266 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
3268 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing
3269 || optab_libfunc (unoptab, wider_mode))
3271 rtx xop0 = op0;
3272 rtx last = get_last_insn ();
3274 /* For certain operations, we need not actually extend
3275 the narrow operand, as long as we will truncate the
3276 results to the same narrowness. */
3277 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3278 (unoptab == neg_optab
3279 || unoptab == one_cmpl_optab
3280 || unoptab == bswap_optab)
3281 && mclass == MODE_INT);
3283 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3284 unsignedp);
3286 /* If we are generating clz using wider mode, adjust the
3287 result. Similarly for clrsb. */
3288 if ((unoptab == clz_optab || unoptab == clrsb_optab)
3289 && temp != 0)
3290 temp = expand_binop (wider_mode, sub_optab, temp,
3291 GEN_INT (GET_MODE_PRECISION (wider_mode)
3292 - GET_MODE_PRECISION (mode)),
3293 target, true, OPTAB_DIRECT);
3295 /* Likewise for bswap. */
3296 if (unoptab == bswap_optab && temp != 0)
3298 gcc_assert (GET_MODE_PRECISION (wider_mode)
3299 == GET_MODE_BITSIZE (wider_mode)
3300 && GET_MODE_PRECISION (mode)
3301 == GET_MODE_BITSIZE (mode));
3303 temp = expand_shift (RSHIFT_EXPR, wider_mode, temp,
3304 GET_MODE_BITSIZE (wider_mode)
3305 - GET_MODE_BITSIZE (mode),
3306 NULL_RTX, true);
3309 if (temp)
3311 if (mclass != MODE_INT)
3313 if (target == 0)
3314 target = gen_reg_rtx (mode);
3315 convert_move (target, temp, 0);
3316 return target;
3318 else
3319 return gen_lowpart (mode, temp);
3321 else
3322 delete_insns_since (last);
3327 /* One final attempt at implementing negation via subtraction,
3328 this time allowing widening of the operand. */
3329 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
3331 rtx temp;
3332 temp = expand_binop (mode,
3333 unoptab == negv_optab ? subv_optab : sub_optab,
3334 CONST0_RTX (mode), op0,
3335 target, unsignedp, OPTAB_LIB_WIDEN);
3336 if (temp)
3337 return temp;
3340 return 0;
3343 /* Emit code to compute the absolute value of OP0, with result to
3344 TARGET if convenient. (TARGET may be 0.) The return value says
3345 where the result actually is to be found.
3347 MODE is the mode of the operand; the mode of the result is
3348 different but can be deduced from MODE.
3353 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
3354 int result_unsignedp)
3356 rtx temp;
3358 if (! flag_trapv)
3359 result_unsignedp = 1;
3361 /* First try to do it with a special abs instruction. */
3362 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
3363 op0, target, 0);
3364 if (temp != 0)
3365 return temp;
3367 /* For floating point modes, try clearing the sign bit. */
3368 if (SCALAR_FLOAT_MODE_P (mode))
3370 temp = expand_absneg_bit (ABS, mode, op0, target);
3371 if (temp)
3372 return temp;
3375 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3376 if (optab_handler (smax_optab, mode) != CODE_FOR_nothing
3377 && !HONOR_SIGNED_ZEROS (mode))
3379 rtx last = get_last_insn ();
3381 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
3382 if (temp != 0)
3383 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3384 OPTAB_WIDEN);
3386 if (temp != 0)
3387 return temp;
3389 delete_insns_since (last);
3392 /* If this machine has expensive jumps, we can do integer absolute
3393 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3394 where W is the width of MODE. */
3396 if (GET_MODE_CLASS (mode) == MODE_INT
3397 && BRANCH_COST (optimize_insn_for_speed_p (),
3398 false) >= 2)
3400 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
3401 GET_MODE_PRECISION (mode) - 1,
3402 NULL_RTX, 0);
3404 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
3405 OPTAB_LIB_WIDEN);
3406 if (temp != 0)
3407 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
3408 temp, extended, target, 0, OPTAB_LIB_WIDEN);
3410 if (temp != 0)
3411 return temp;
3414 return NULL_RTX;
3418 expand_abs (enum machine_mode mode, rtx op0, rtx target,
3419 int result_unsignedp, int safe)
3421 rtx temp, op1;
3423 if (! flag_trapv)
3424 result_unsignedp = 1;
3426 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
3427 if (temp != 0)
3428 return temp;
3430 /* If that does not win, use conditional jump and negate. */
3432 /* It is safe to use the target if it is the same
3433 as the source if this is also a pseudo register */
3434 if (op0 == target && REG_P (op0)
3435 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
3436 safe = 1;
3438 op1 = gen_label_rtx ();
3439 if (target == 0 || ! safe
3440 || GET_MODE (target) != mode
3441 || (MEM_P (target) && MEM_VOLATILE_P (target))
3442 || (REG_P (target)
3443 && REGNO (target) < FIRST_PSEUDO_REGISTER))
3444 target = gen_reg_rtx (mode);
3446 emit_move_insn (target, op0);
3447 NO_DEFER_POP;
3449 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
3450 NULL_RTX, NULL_RTX, op1, -1);
3452 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3453 target, target, 0);
3454 if (op0 != target)
3455 emit_move_insn (target, op0);
3456 emit_label (op1);
3457 OK_DEFER_POP;
3458 return target;
3461 /* Emit code to compute the one's complement absolute value of OP0
3462 (if (OP0 < 0) OP0 = ~OP0), with result to TARGET if convenient.
3463 (TARGET may be NULL_RTX.) The return value says where the result
3464 actually is to be found.
3466 MODE is the mode of the operand; the mode of the result is
3467 different but can be deduced from MODE. */
3470 expand_one_cmpl_abs_nojump (enum machine_mode mode, rtx op0, rtx target)
3472 rtx temp;
3474 /* Not applicable for floating point modes. */
3475 if (FLOAT_MODE_P (mode))
3476 return NULL_RTX;
3478 /* If we have a MAX insn, we can do this as MAX (x, ~x). */
3479 if (optab_handler (smax_optab, mode) != CODE_FOR_nothing)
3481 rtx last = get_last_insn ();
3483 temp = expand_unop (mode, one_cmpl_optab, op0, NULL_RTX, 0);
3484 if (temp != 0)
3485 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3486 OPTAB_WIDEN);
3488 if (temp != 0)
3489 return temp;
3491 delete_insns_since (last);
3494 /* If this machine has expensive jumps, we can do one's complement
3495 absolute value of X as (((signed) x >> (W-1)) ^ x). */
3497 if (GET_MODE_CLASS (mode) == MODE_INT
3498 && BRANCH_COST (optimize_insn_for_speed_p (),
3499 false) >= 2)
3501 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
3502 GET_MODE_PRECISION (mode) - 1,
3503 NULL_RTX, 0);
3505 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
3506 OPTAB_LIB_WIDEN);
3508 if (temp != 0)
3509 return temp;
3512 return NULL_RTX;
3515 /* A subroutine of expand_copysign, perform the copysign operation using the
3516 abs and neg primitives advertised to exist on the target. The assumption
3517 is that we have a split register file, and leaving op0 in fp registers,
3518 and not playing with subregs so much, will help the register allocator. */
3520 static rtx
3521 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3522 int bitpos, bool op0_is_abs)
3524 enum machine_mode imode;
3525 enum insn_code icode;
3526 rtx sign, label;
3528 if (target == op1)
3529 target = NULL_RTX;
3531 /* Check if the back end provides an insn that handles signbit for the
3532 argument's mode. */
3533 icode = optab_handler (signbit_optab, mode);
3534 if (icode != CODE_FOR_nothing)
3536 imode = insn_data[(int) icode].operand[0].mode;
3537 sign = gen_reg_rtx (imode);
3538 emit_unop_insn (icode, sign, op1, UNKNOWN);
3540 else
3542 double_int mask;
3544 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3546 imode = int_mode_for_mode (mode);
3547 if (imode == BLKmode)
3548 return NULL_RTX;
3549 op1 = gen_lowpart (imode, op1);
3551 else
3553 int word;
3555 imode = word_mode;
3556 if (FLOAT_WORDS_BIG_ENDIAN)
3557 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3558 else
3559 word = bitpos / BITS_PER_WORD;
3560 bitpos = bitpos % BITS_PER_WORD;
3561 op1 = operand_subword_force (op1, word, mode);
3564 mask = double_int_setbit (double_int_zero, bitpos);
3566 sign = expand_binop (imode, and_optab, op1,
3567 immed_double_int_const (mask, imode),
3568 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3571 if (!op0_is_abs)
3573 op0 = expand_unop (mode, abs_optab, op0, target, 0);
3574 if (op0 == NULL)
3575 return NULL_RTX;
3576 target = op0;
3578 else
3580 if (target == NULL_RTX)
3581 target = copy_to_reg (op0);
3582 else
3583 emit_move_insn (target, op0);
3586 label = gen_label_rtx ();
3587 emit_cmp_and_jump_insns (sign, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3589 if (GET_CODE (op0) == CONST_DOUBLE)
3590 op0 = simplify_unary_operation (NEG, mode, op0, mode);
3591 else
3592 op0 = expand_unop (mode, neg_optab, op0, target, 0);
3593 if (op0 != target)
3594 emit_move_insn (target, op0);
3596 emit_label (label);
3598 return target;
3602 /* A subroutine of expand_copysign, perform the entire copysign operation
3603 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3604 is true if op0 is known to have its sign bit clear. */
3606 static rtx
3607 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3608 int bitpos, bool op0_is_abs)
3610 enum machine_mode imode;
3611 double_int mask;
3612 int word, nwords, i;
3613 rtx temp, insns;
3615 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3617 imode = int_mode_for_mode (mode);
3618 if (imode == BLKmode)
3619 return NULL_RTX;
3620 word = 0;
3621 nwords = 1;
3623 else
3625 imode = word_mode;
3627 if (FLOAT_WORDS_BIG_ENDIAN)
3628 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3629 else
3630 word = bitpos / BITS_PER_WORD;
3631 bitpos = bitpos % BITS_PER_WORD;
3632 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3635 mask = double_int_setbit (double_int_zero, bitpos);
3637 if (target == 0
3638 || target == op0
3639 || target == op1
3640 || (nwords > 1 && !valid_multiword_target_p (target)))
3641 target = gen_reg_rtx (mode);
3643 if (nwords > 1)
3645 start_sequence ();
3647 for (i = 0; i < nwords; ++i)
3649 rtx targ_piece = operand_subword (target, i, 1, mode);
3650 rtx op0_piece = operand_subword_force (op0, i, mode);
3652 if (i == word)
3654 if (!op0_is_abs)
3655 op0_piece
3656 = expand_binop (imode, and_optab, op0_piece,
3657 immed_double_int_const (double_int_not (mask),
3658 imode),
3659 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3661 op1 = expand_binop (imode, and_optab,
3662 operand_subword_force (op1, i, mode),
3663 immed_double_int_const (mask, imode),
3664 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3666 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3667 targ_piece, 1, OPTAB_LIB_WIDEN);
3668 if (temp != targ_piece)
3669 emit_move_insn (targ_piece, temp);
3671 else
3672 emit_move_insn (targ_piece, op0_piece);
3675 insns = get_insns ();
3676 end_sequence ();
3678 emit_insn (insns);
3680 else
3682 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3683 immed_double_int_const (mask, imode),
3684 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3686 op0 = gen_lowpart (imode, op0);
3687 if (!op0_is_abs)
3688 op0 = expand_binop (imode, and_optab, op0,
3689 immed_double_int_const (double_int_not (mask),
3690 imode),
3691 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3693 temp = expand_binop (imode, ior_optab, op0, op1,
3694 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3695 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3698 return target;
3701 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3702 scalar floating point mode. Return NULL if we do not know how to
3703 expand the operation inline. */
3706 expand_copysign (rtx op0, rtx op1, rtx target)
3708 enum machine_mode mode = GET_MODE (op0);
3709 const struct real_format *fmt;
3710 bool op0_is_abs;
3711 rtx temp;
3713 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3714 gcc_assert (GET_MODE (op1) == mode);
3716 /* First try to do it with a special instruction. */
3717 temp = expand_binop (mode, copysign_optab, op0, op1,
3718 target, 0, OPTAB_DIRECT);
3719 if (temp)
3720 return temp;
3722 fmt = REAL_MODE_FORMAT (mode);
3723 if (fmt == NULL || !fmt->has_signed_zero)
3724 return NULL_RTX;
3726 op0_is_abs = false;
3727 if (GET_CODE (op0) == CONST_DOUBLE)
3729 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3730 op0 = simplify_unary_operation (ABS, mode, op0, mode);
3731 op0_is_abs = true;
3734 if (fmt->signbit_ro >= 0
3735 && (GET_CODE (op0) == CONST_DOUBLE
3736 || (optab_handler (neg_optab, mode) != CODE_FOR_nothing
3737 && optab_handler (abs_optab, mode) != CODE_FOR_nothing)))
3739 temp = expand_copysign_absneg (mode, op0, op1, target,
3740 fmt->signbit_ro, op0_is_abs);
3741 if (temp)
3742 return temp;
3745 if (fmt->signbit_rw < 0)
3746 return NULL_RTX;
3747 return expand_copysign_bit (mode, op0, op1, target,
3748 fmt->signbit_rw, op0_is_abs);
3751 /* Generate an instruction whose insn-code is INSN_CODE,
3752 with two operands: an output TARGET and an input OP0.
3753 TARGET *must* be nonzero, and the output is always stored there.
3754 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3755 the value that is stored into TARGET.
3757 Return false if expansion failed. */
3759 bool
3760 maybe_emit_unop_insn (enum insn_code icode, rtx target, rtx op0,
3761 enum rtx_code code)
3763 struct expand_operand ops[2];
3764 rtx pat;
3766 create_output_operand (&ops[0], target, GET_MODE (target));
3767 create_input_operand (&ops[1], op0, GET_MODE (op0));
3768 pat = maybe_gen_insn (icode, 2, ops);
3769 if (!pat)
3770 return false;
3772 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3773 add_equal_note (pat, ops[0].value, code, ops[1].value, NULL_RTX);
3775 emit_insn (pat);
3777 if (ops[0].value != target)
3778 emit_move_insn (target, ops[0].value);
3779 return true;
3781 /* Generate an instruction whose insn-code is INSN_CODE,
3782 with two operands: an output TARGET and an input OP0.
3783 TARGET *must* be nonzero, and the output is always stored there.
3784 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3785 the value that is stored into TARGET. */
3787 void
3788 emit_unop_insn (enum insn_code icode, rtx target, rtx op0, enum rtx_code code)
3790 bool ok = maybe_emit_unop_insn (icode, target, op0, code);
3791 gcc_assert (ok);
3794 struct no_conflict_data
3796 rtx target, first, insn;
3797 bool must_stay;
3800 /* Called via note_stores by emit_libcall_block. Set P->must_stay if
3801 the currently examined clobber / store has to stay in the list of
3802 insns that constitute the actual libcall block. */
3803 static void
3804 no_conflict_move_test (rtx dest, const_rtx set, void *p0)
3806 struct no_conflict_data *p= (struct no_conflict_data *) p0;
3808 /* If this inns directly contributes to setting the target, it must stay. */
3809 if (reg_overlap_mentioned_p (p->target, dest))
3810 p->must_stay = true;
3811 /* If we haven't committed to keeping any other insns in the list yet,
3812 there is nothing more to check. */
3813 else if (p->insn == p->first)
3814 return;
3815 /* If this insn sets / clobbers a register that feeds one of the insns
3816 already in the list, this insn has to stay too. */
3817 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3818 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3819 || reg_used_between_p (dest, p->first, p->insn)
3820 /* Likewise if this insn depends on a register set by a previous
3821 insn in the list, or if it sets a result (presumably a hard
3822 register) that is set or clobbered by a previous insn.
3823 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3824 SET_DEST perform the former check on the address, and the latter
3825 check on the MEM. */
3826 || (GET_CODE (set) == SET
3827 && (modified_in_p (SET_SRC (set), p->first)
3828 || modified_in_p (SET_DEST (set), p->first)
3829 || modified_between_p (SET_SRC (set), p->first, p->insn)
3830 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3831 p->must_stay = true;
3835 /* Emit code to make a call to a constant function or a library call.
3837 INSNS is a list containing all insns emitted in the call.
3838 These insns leave the result in RESULT. Our block is to copy RESULT
3839 to TARGET, which is logically equivalent to EQUIV.
3841 We first emit any insns that set a pseudo on the assumption that these are
3842 loading constants into registers; doing so allows them to be safely cse'ed
3843 between blocks. Then we emit all the other insns in the block, followed by
3844 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3845 note with an operand of EQUIV. */
3847 static void
3848 emit_libcall_block_1 (rtx insns, rtx target, rtx result, rtx equiv,
3849 bool equiv_may_trap)
3851 rtx final_dest = target;
3852 rtx next, last, insn;
3854 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3855 into a MEM later. Protect the libcall block from this change. */
3856 if (! REG_P (target) || REG_USERVAR_P (target))
3857 target = gen_reg_rtx (GET_MODE (target));
3859 /* If we're using non-call exceptions, a libcall corresponding to an
3860 operation that may trap may also trap. */
3861 /* ??? See the comment in front of make_reg_eh_region_note. */
3862 if (cfun->can_throw_non_call_exceptions
3863 && (equiv_may_trap || may_trap_p (equiv)))
3865 for (insn = insns; insn; insn = NEXT_INSN (insn))
3866 if (CALL_P (insn))
3868 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3869 if (note)
3871 int lp_nr = INTVAL (XEXP (note, 0));
3872 if (lp_nr == 0 || lp_nr == INT_MIN)
3873 remove_note (insn, note);
3877 else
3879 /* Look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3880 reg note to indicate that this call cannot throw or execute a nonlocal
3881 goto (unless there is already a REG_EH_REGION note, in which case
3882 we update it). */
3883 for (insn = insns; insn; insn = NEXT_INSN (insn))
3884 if (CALL_P (insn))
3885 make_reg_eh_region_note_nothrow_nononlocal (insn);
3888 /* First emit all insns that set pseudos. Remove them from the list as
3889 we go. Avoid insns that set pseudos which were referenced in previous
3890 insns. These can be generated by move_by_pieces, for example,
3891 to update an address. Similarly, avoid insns that reference things
3892 set in previous insns. */
3894 for (insn = insns; insn; insn = next)
3896 rtx set = single_set (insn);
3898 next = NEXT_INSN (insn);
3900 if (set != 0 && REG_P (SET_DEST (set))
3901 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3903 struct no_conflict_data data;
3905 data.target = const0_rtx;
3906 data.first = insns;
3907 data.insn = insn;
3908 data.must_stay = 0;
3909 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3910 if (! data.must_stay)
3912 if (PREV_INSN (insn))
3913 NEXT_INSN (PREV_INSN (insn)) = next;
3914 else
3915 insns = next;
3917 if (next)
3918 PREV_INSN (next) = PREV_INSN (insn);
3920 add_insn (insn);
3924 /* Some ports use a loop to copy large arguments onto the stack.
3925 Don't move anything outside such a loop. */
3926 if (LABEL_P (insn))
3927 break;
3930 /* Write the remaining insns followed by the final copy. */
3931 for (insn = insns; insn; insn = next)
3933 next = NEXT_INSN (insn);
3935 add_insn (insn);
3938 last = emit_move_insn (target, result);
3939 set_dst_reg_note (last, REG_EQUAL, copy_rtx (equiv), target);
3941 if (final_dest != target)
3942 emit_move_insn (final_dest, target);
3945 void
3946 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3948 emit_libcall_block_1 (insns, target, result, equiv, false);
3951 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3952 PURPOSE describes how this comparison will be used. CODE is the rtx
3953 comparison code we will be using.
3955 ??? Actually, CODE is slightly weaker than that. A target is still
3956 required to implement all of the normal bcc operations, but not
3957 required to implement all (or any) of the unordered bcc operations. */
3960 can_compare_p (enum rtx_code code, enum machine_mode mode,
3961 enum can_compare_purpose purpose)
3963 rtx test;
3964 test = gen_rtx_fmt_ee (code, mode, const0_rtx, const0_rtx);
3967 enum insn_code icode;
3969 if (purpose == ccp_jump
3970 && (icode = optab_handler (cbranch_optab, mode)) != CODE_FOR_nothing
3971 && insn_operand_matches (icode, 0, test))
3972 return 1;
3973 if (purpose == ccp_store_flag
3974 && (icode = optab_handler (cstore_optab, mode)) != CODE_FOR_nothing
3975 && insn_operand_matches (icode, 1, test))
3976 return 1;
3977 if (purpose == ccp_cmov
3978 && optab_handler (cmov_optab, mode) != CODE_FOR_nothing)
3979 return 1;
3981 mode = GET_MODE_WIDER_MODE (mode);
3982 PUT_MODE (test, mode);
3984 while (mode != VOIDmode);
3986 return 0;
3989 /* This function is called when we are going to emit a compare instruction that
3990 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3992 *PMODE is the mode of the inputs (in case they are const_int).
3993 *PUNSIGNEDP nonzero says that the operands are unsigned;
3994 this matters if they need to be widened (as given by METHODS).
3996 If they have mode BLKmode, then SIZE specifies the size of both operands.
3998 This function performs all the setup necessary so that the caller only has
3999 to emit a single comparison insn. This setup can involve doing a BLKmode
4000 comparison or emitting a library call to perform the comparison if no insn
4001 is available to handle it.
4002 The values which are passed in through pointers can be modified; the caller
4003 should perform the comparison on the modified values. Constant
4004 comparisons must have already been folded. */
4006 static void
4007 prepare_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
4008 int unsignedp, enum optab_methods methods,
4009 rtx *ptest, enum machine_mode *pmode)
4011 enum machine_mode mode = *pmode;
4012 rtx libfunc, test;
4013 enum machine_mode cmp_mode;
4014 enum mode_class mclass;
4016 /* The other methods are not needed. */
4017 gcc_assert (methods == OPTAB_DIRECT || methods == OPTAB_WIDEN
4018 || methods == OPTAB_LIB_WIDEN);
4020 /* If we are optimizing, force expensive constants into a register. */
4021 if (CONSTANT_P (x) && optimize
4022 && (rtx_cost (x, COMPARE, 0, optimize_insn_for_speed_p ())
4023 > COSTS_N_INSNS (1)))
4024 x = force_reg (mode, x);
4026 if (CONSTANT_P (y) && optimize
4027 && (rtx_cost (y, COMPARE, 1, optimize_insn_for_speed_p ())
4028 > COSTS_N_INSNS (1)))
4029 y = force_reg (mode, y);
4031 #ifdef HAVE_cc0
4032 /* Make sure if we have a canonical comparison. The RTL
4033 documentation states that canonical comparisons are required only
4034 for targets which have cc0. */
4035 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
4036 #endif
4038 /* Don't let both operands fail to indicate the mode. */
4039 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
4040 x = force_reg (mode, x);
4041 if (mode == VOIDmode)
4042 mode = GET_MODE (x) != VOIDmode ? GET_MODE (x) : GET_MODE (y);
4044 /* Handle all BLKmode compares. */
4046 if (mode == BLKmode)
4048 enum machine_mode result_mode;
4049 enum insn_code cmp_code;
4050 tree length_type;
4051 rtx libfunc;
4052 rtx result;
4053 rtx opalign
4054 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
4056 gcc_assert (size);
4058 /* Try to use a memory block compare insn - either cmpstr
4059 or cmpmem will do. */
4060 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
4061 cmp_mode != VOIDmode;
4062 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
4064 cmp_code = direct_optab_handler (cmpmem_optab, cmp_mode);
4065 if (cmp_code == CODE_FOR_nothing)
4066 cmp_code = direct_optab_handler (cmpstr_optab, cmp_mode);
4067 if (cmp_code == CODE_FOR_nothing)
4068 cmp_code = direct_optab_handler (cmpstrn_optab, cmp_mode);
4069 if (cmp_code == CODE_FOR_nothing)
4070 continue;
4072 /* Must make sure the size fits the insn's mode. */
4073 if ((CONST_INT_P (size)
4074 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
4075 || (GET_MODE_BITSIZE (GET_MODE (size))
4076 > GET_MODE_BITSIZE (cmp_mode)))
4077 continue;
4079 result_mode = insn_data[cmp_code].operand[0].mode;
4080 result = gen_reg_rtx (result_mode);
4081 size = convert_to_mode (cmp_mode, size, 1);
4082 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
4084 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, result, const0_rtx);
4085 *pmode = result_mode;
4086 return;
4089 if (methods != OPTAB_LIB && methods != OPTAB_LIB_WIDEN)
4090 goto fail;
4092 /* Otherwise call a library function, memcmp. */
4093 libfunc = memcmp_libfunc;
4094 length_type = sizetype;
4095 result_mode = TYPE_MODE (integer_type_node);
4096 cmp_mode = TYPE_MODE (length_type);
4097 size = convert_to_mode (TYPE_MODE (length_type), size,
4098 TYPE_UNSIGNED (length_type));
4100 result = emit_library_call_value (libfunc, 0, LCT_PURE,
4101 result_mode, 3,
4102 XEXP (x, 0), Pmode,
4103 XEXP (y, 0), Pmode,
4104 size, cmp_mode);
4106 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, result, const0_rtx);
4107 *pmode = result_mode;
4108 return;
4111 /* Don't allow operands to the compare to trap, as that can put the
4112 compare and branch in different basic blocks. */
4113 if (cfun->can_throw_non_call_exceptions)
4115 if (may_trap_p (x))
4116 x = force_reg (mode, x);
4117 if (may_trap_p (y))
4118 y = force_reg (mode, y);
4121 if (GET_MODE_CLASS (mode) == MODE_CC)
4123 gcc_assert (can_compare_p (comparison, CCmode, ccp_jump));
4124 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
4125 return;
4128 mclass = GET_MODE_CLASS (mode);
4129 test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
4130 cmp_mode = mode;
4133 enum insn_code icode;
4134 icode = optab_handler (cbranch_optab, cmp_mode);
4135 if (icode != CODE_FOR_nothing
4136 && insn_operand_matches (icode, 0, test))
4138 rtx last = get_last_insn ();
4139 rtx op0 = prepare_operand (icode, x, 1, mode, cmp_mode, unsignedp);
4140 rtx op1 = prepare_operand (icode, y, 2, mode, cmp_mode, unsignedp);
4141 if (op0 && op1
4142 && insn_operand_matches (icode, 1, op0)
4143 && insn_operand_matches (icode, 2, op1))
4145 XEXP (test, 0) = op0;
4146 XEXP (test, 1) = op1;
4147 *ptest = test;
4148 *pmode = cmp_mode;
4149 return;
4151 delete_insns_since (last);
4154 if (methods == OPTAB_DIRECT || !CLASS_HAS_WIDER_MODES_P (mclass))
4155 break;
4156 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode);
4158 while (cmp_mode != VOIDmode);
4160 if (methods != OPTAB_LIB_WIDEN)
4161 goto fail;
4163 if (!SCALAR_FLOAT_MODE_P (mode))
4165 rtx result;
4166 enum machine_mode ret_mode;
4168 /* Handle a libcall just for the mode we are using. */
4169 libfunc = optab_libfunc (cmp_optab, mode);
4170 gcc_assert (libfunc);
4172 /* If we want unsigned, and this mode has a distinct unsigned
4173 comparison routine, use that. */
4174 if (unsignedp)
4176 rtx ulibfunc = optab_libfunc (ucmp_optab, mode);
4177 if (ulibfunc)
4178 libfunc = ulibfunc;
4181 ret_mode = targetm.libgcc_cmp_return_mode ();
4182 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4183 ret_mode, 2, x, mode, y, mode);
4185 /* There are two kinds of comparison routines. Biased routines
4186 return 0/1/2, and unbiased routines return -1/0/1. Other parts
4187 of gcc expect that the comparison operation is equivalent
4188 to the modified comparison. For signed comparisons compare the
4189 result against 1 in the biased case, and zero in the unbiased
4190 case. For unsigned comparisons always compare against 1 after
4191 biasing the unbiased result by adding 1. This gives us a way to
4192 represent LTU.
4193 The comparisons in the fixed-point helper library are always
4194 biased. */
4195 x = result;
4196 y = const1_rtx;
4198 if (!TARGET_LIB_INT_CMP_BIASED && !ALL_FIXED_POINT_MODE_P (mode))
4200 if (unsignedp)
4201 x = plus_constant (ret_mode, result, 1);
4202 else
4203 y = const0_rtx;
4206 *pmode = word_mode;
4207 prepare_cmp_insn (x, y, comparison, NULL_RTX, unsignedp, methods,
4208 ptest, pmode);
4210 else
4211 prepare_float_lib_cmp (x, y, comparison, ptest, pmode);
4213 return;
4215 fail:
4216 *ptest = NULL_RTX;
4219 /* Before emitting an insn with code ICODE, make sure that X, which is going
4220 to be used for operand OPNUM of the insn, is converted from mode MODE to
4221 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
4222 that it is accepted by the operand predicate. Return the new value. */
4225 prepare_operand (enum insn_code icode, rtx x, int opnum, enum machine_mode mode,
4226 enum machine_mode wider_mode, int unsignedp)
4228 if (mode != wider_mode)
4229 x = convert_modes (wider_mode, mode, x, unsignedp);
4231 if (!insn_operand_matches (icode, opnum, x))
4233 if (reload_completed)
4234 return NULL_RTX;
4235 x = copy_to_mode_reg (insn_data[(int) icode].operand[opnum].mode, x);
4238 return x;
4241 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
4242 we can do the branch. */
4244 static void
4245 emit_cmp_and_jump_insn_1 (rtx test, enum machine_mode mode, rtx label)
4247 enum machine_mode optab_mode;
4248 enum mode_class mclass;
4249 enum insn_code icode;
4251 mclass = GET_MODE_CLASS (mode);
4252 optab_mode = (mclass == MODE_CC) ? CCmode : mode;
4253 icode = optab_handler (cbranch_optab, optab_mode);
4255 gcc_assert (icode != CODE_FOR_nothing);
4256 gcc_assert (insn_operand_matches (icode, 0, test));
4257 emit_jump_insn (GEN_FCN (icode) (test, XEXP (test, 0), XEXP (test, 1), label));
4260 /* Generate code to compare X with Y so that the condition codes are
4261 set and to jump to LABEL if the condition is true. If X is a
4262 constant and Y is not a constant, then the comparison is swapped to
4263 ensure that the comparison RTL has the canonical form.
4265 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4266 need to be widened. UNSIGNEDP is also used to select the proper
4267 branch condition code.
4269 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4271 MODE is the mode of the inputs (in case they are const_int).
4273 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
4274 It will be potentially converted into an unsigned variant based on
4275 UNSIGNEDP to select a proper jump instruction. */
4277 void
4278 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
4279 enum machine_mode mode, int unsignedp, rtx label)
4281 rtx op0 = x, op1 = y;
4282 rtx test;
4284 /* Swap operands and condition to ensure canonical RTL. */
4285 if (swap_commutative_operands_p (x, y)
4286 && can_compare_p (swap_condition (comparison), mode, ccp_jump))
4288 op0 = y, op1 = x;
4289 comparison = swap_condition (comparison);
4292 /* If OP0 is still a constant, then both X and Y must be constants
4293 or the opposite comparison is not supported. Force X into a register
4294 to create canonical RTL. */
4295 if (CONSTANT_P (op0))
4296 op0 = force_reg (mode, op0);
4298 if (unsignedp)
4299 comparison = unsigned_condition (comparison);
4301 prepare_cmp_insn (op0, op1, comparison, size, unsignedp, OPTAB_LIB_WIDEN,
4302 &test, &mode);
4303 emit_cmp_and_jump_insn_1 (test, mode, label);
4307 /* Emit a library call comparison between floating point X and Y.
4308 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4310 static void
4311 prepare_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison,
4312 rtx *ptest, enum machine_mode *pmode)
4314 enum rtx_code swapped = swap_condition (comparison);
4315 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
4316 enum machine_mode orig_mode = GET_MODE (x);
4317 enum machine_mode mode, cmp_mode;
4318 rtx true_rtx, false_rtx;
4319 rtx value, target, insns, equiv;
4320 rtx libfunc = 0;
4321 bool reversed_p = false;
4322 cmp_mode = targetm.libgcc_cmp_return_mode ();
4324 for (mode = orig_mode;
4325 mode != VOIDmode;
4326 mode = GET_MODE_WIDER_MODE (mode))
4328 if (code_to_optab[comparison]
4329 && (libfunc = optab_libfunc (code_to_optab[comparison], mode)))
4330 break;
4332 if (code_to_optab[swapped]
4333 && (libfunc = optab_libfunc (code_to_optab[swapped], mode)))
4335 rtx tmp;
4336 tmp = x; x = y; y = tmp;
4337 comparison = swapped;
4338 break;
4341 if (code_to_optab[reversed]
4342 && (libfunc = optab_libfunc (code_to_optab[reversed], mode)))
4344 comparison = reversed;
4345 reversed_p = true;
4346 break;
4350 gcc_assert (mode != VOIDmode);
4352 if (mode != orig_mode)
4354 x = convert_to_mode (mode, x, 0);
4355 y = convert_to_mode (mode, y, 0);
4358 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4359 the RTL. The allows the RTL optimizers to delete the libcall if the
4360 condition can be determined at compile-time. */
4361 if (comparison == UNORDERED
4362 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4364 true_rtx = const_true_rtx;
4365 false_rtx = const0_rtx;
4367 else
4369 switch (comparison)
4371 case EQ:
4372 true_rtx = const0_rtx;
4373 false_rtx = const_true_rtx;
4374 break;
4376 case NE:
4377 true_rtx = const_true_rtx;
4378 false_rtx = const0_rtx;
4379 break;
4381 case GT:
4382 true_rtx = const1_rtx;
4383 false_rtx = const0_rtx;
4384 break;
4386 case GE:
4387 true_rtx = const0_rtx;
4388 false_rtx = constm1_rtx;
4389 break;
4391 case LT:
4392 true_rtx = constm1_rtx;
4393 false_rtx = const0_rtx;
4394 break;
4396 case LE:
4397 true_rtx = const0_rtx;
4398 false_rtx = const1_rtx;
4399 break;
4401 default:
4402 gcc_unreachable ();
4406 if (comparison == UNORDERED)
4408 rtx temp = simplify_gen_relational (NE, cmp_mode, mode, x, x);
4409 equiv = simplify_gen_relational (NE, cmp_mode, mode, y, y);
4410 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4411 temp, const_true_rtx, equiv);
4413 else
4415 equiv = simplify_gen_relational (comparison, cmp_mode, mode, x, y);
4416 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4417 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4418 equiv, true_rtx, false_rtx);
4421 start_sequence ();
4422 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4423 cmp_mode, 2, x, mode, y, mode);
4424 insns = get_insns ();
4425 end_sequence ();
4427 target = gen_reg_rtx (cmp_mode);
4428 emit_libcall_block (insns, target, value, equiv);
4430 if (comparison == UNORDERED
4431 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison)
4432 || reversed_p)
4433 *ptest = gen_rtx_fmt_ee (reversed_p ? EQ : NE, VOIDmode, target, false_rtx);
4434 else
4435 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, target, const0_rtx);
4437 *pmode = cmp_mode;
4440 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4442 void
4443 emit_indirect_jump (rtx loc)
4445 struct expand_operand ops[1];
4447 create_address_operand (&ops[0], loc);
4448 expand_jump_insn (CODE_FOR_indirect_jump, 1, ops);
4449 emit_barrier ();
4452 #ifdef HAVE_conditional_move
4454 /* Emit a conditional move instruction if the machine supports one for that
4455 condition and machine mode.
4457 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4458 the mode to use should they be constants. If it is VOIDmode, they cannot
4459 both be constants.
4461 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4462 should be stored there. MODE is the mode to use should they be constants.
4463 If it is VOIDmode, they cannot both be constants.
4465 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4466 is not supported. */
4469 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4470 enum machine_mode cmode, rtx op2, rtx op3,
4471 enum machine_mode mode, int unsignedp)
4473 rtx tem, comparison, last;
4474 enum insn_code icode;
4475 enum rtx_code reversed;
4477 /* If one operand is constant, make it the second one. Only do this
4478 if the other operand is not constant as well. */
4480 if (swap_commutative_operands_p (op0, op1))
4482 tem = op0;
4483 op0 = op1;
4484 op1 = tem;
4485 code = swap_condition (code);
4488 /* get_condition will prefer to generate LT and GT even if the old
4489 comparison was against zero, so undo that canonicalization here since
4490 comparisons against zero are cheaper. */
4491 if (code == LT && op1 == const1_rtx)
4492 code = LE, op1 = const0_rtx;
4493 else if (code == GT && op1 == constm1_rtx)
4494 code = GE, op1 = const0_rtx;
4496 if (cmode == VOIDmode)
4497 cmode = GET_MODE (op0);
4499 if (swap_commutative_operands_p (op2, op3)
4500 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4501 != UNKNOWN))
4503 tem = op2;
4504 op2 = op3;
4505 op3 = tem;
4506 code = reversed;
4509 if (mode == VOIDmode)
4510 mode = GET_MODE (op2);
4512 icode = direct_optab_handler (movcc_optab, mode);
4514 if (icode == CODE_FOR_nothing)
4515 return 0;
4517 if (!target)
4518 target = gen_reg_rtx (mode);
4520 code = unsignedp ? unsigned_condition (code) : code;
4521 comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
4523 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4524 return NULL and let the caller figure out how best to deal with this
4525 situation. */
4526 if (!COMPARISON_P (comparison))
4527 return NULL_RTX;
4529 do_pending_stack_adjust ();
4530 last = get_last_insn ();
4531 prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
4532 GET_CODE (comparison), NULL_RTX, unsignedp, OPTAB_WIDEN,
4533 &comparison, &cmode);
4534 if (comparison)
4536 struct expand_operand ops[4];
4538 create_output_operand (&ops[0], target, mode);
4539 create_fixed_operand (&ops[1], comparison);
4540 create_input_operand (&ops[2], op2, mode);
4541 create_input_operand (&ops[3], op3, mode);
4542 if (maybe_expand_insn (icode, 4, ops))
4544 if (ops[0].value != target)
4545 convert_move (target, ops[0].value, false);
4546 return target;
4549 delete_insns_since (last);
4550 return NULL_RTX;
4553 /* Return nonzero if a conditional move of mode MODE is supported.
4555 This function is for combine so it can tell whether an insn that looks
4556 like a conditional move is actually supported by the hardware. If we
4557 guess wrong we lose a bit on optimization, but that's it. */
4558 /* ??? sparc64 supports conditionally moving integers values based on fp
4559 comparisons, and vice versa. How do we handle them? */
4562 can_conditionally_move_p (enum machine_mode mode)
4564 if (direct_optab_handler (movcc_optab, mode) != CODE_FOR_nothing)
4565 return 1;
4567 return 0;
4570 #endif /* HAVE_conditional_move */
4572 /* Emit a conditional addition instruction if the machine supports one for that
4573 condition and machine mode.
4575 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4576 the mode to use should they be constants. If it is VOIDmode, they cannot
4577 both be constants.
4579 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4580 should be stored there. MODE is the mode to use should they be constants.
4581 If it is VOIDmode, they cannot both be constants.
4583 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4584 is not supported. */
4587 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4588 enum machine_mode cmode, rtx op2, rtx op3,
4589 enum machine_mode mode, int unsignedp)
4591 rtx tem, comparison, last;
4592 enum insn_code icode;
4593 enum rtx_code reversed;
4595 /* If one operand is constant, make it the second one. Only do this
4596 if the other operand is not constant as well. */
4598 if (swap_commutative_operands_p (op0, op1))
4600 tem = op0;
4601 op0 = op1;
4602 op1 = tem;
4603 code = swap_condition (code);
4606 /* get_condition will prefer to generate LT and GT even if the old
4607 comparison was against zero, so undo that canonicalization here since
4608 comparisons against zero are cheaper. */
4609 if (code == LT && op1 == const1_rtx)
4610 code = LE, op1 = const0_rtx;
4611 else if (code == GT && op1 == constm1_rtx)
4612 code = GE, op1 = const0_rtx;
4614 if (cmode == VOIDmode)
4615 cmode = GET_MODE (op0);
4617 if (swap_commutative_operands_p (op2, op3)
4618 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4619 != UNKNOWN))
4621 tem = op2;
4622 op2 = op3;
4623 op3 = tem;
4624 code = reversed;
4627 if (mode == VOIDmode)
4628 mode = GET_MODE (op2);
4630 icode = optab_handler (addcc_optab, mode);
4632 if (icode == CODE_FOR_nothing)
4633 return 0;
4635 if (!target)
4636 target = gen_reg_rtx (mode);
4638 code = unsignedp ? unsigned_condition (code) : code;
4639 comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
4641 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4642 return NULL and let the caller figure out how best to deal with this
4643 situation. */
4644 if (!COMPARISON_P (comparison))
4645 return NULL_RTX;
4647 do_pending_stack_adjust ();
4648 last = get_last_insn ();
4649 prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
4650 GET_CODE (comparison), NULL_RTX, unsignedp, OPTAB_WIDEN,
4651 &comparison, &cmode);
4652 if (comparison)
4654 struct expand_operand ops[4];
4656 create_output_operand (&ops[0], target, mode);
4657 create_fixed_operand (&ops[1], comparison);
4658 create_input_operand (&ops[2], op2, mode);
4659 create_input_operand (&ops[3], op3, mode);
4660 if (maybe_expand_insn (icode, 4, ops))
4662 if (ops[0].value != target)
4663 convert_move (target, ops[0].value, false);
4664 return target;
4667 delete_insns_since (last);
4668 return NULL_RTX;
4671 /* These functions attempt to generate an insn body, rather than
4672 emitting the insn, but if the gen function already emits them, we
4673 make no attempt to turn them back into naked patterns. */
4675 /* Generate and return an insn body to add Y to X. */
4678 gen_add2_insn (rtx x, rtx y)
4680 enum insn_code icode = optab_handler (add_optab, GET_MODE (x));
4682 gcc_assert (insn_operand_matches (icode, 0, x));
4683 gcc_assert (insn_operand_matches (icode, 1, x));
4684 gcc_assert (insn_operand_matches (icode, 2, y));
4686 return GEN_FCN (icode) (x, x, y);
4689 /* Generate and return an insn body to add r1 and c,
4690 storing the result in r0. */
4693 gen_add3_insn (rtx r0, rtx r1, rtx c)
4695 enum insn_code icode = optab_handler (add_optab, GET_MODE (r0));
4697 if (icode == CODE_FOR_nothing
4698 || !insn_operand_matches (icode, 0, r0)
4699 || !insn_operand_matches (icode, 1, r1)
4700 || !insn_operand_matches (icode, 2, c))
4701 return NULL_RTX;
4703 return GEN_FCN (icode) (r0, r1, c);
4707 have_add2_insn (rtx x, rtx y)
4709 enum insn_code icode;
4711 gcc_assert (GET_MODE (x) != VOIDmode);
4713 icode = optab_handler (add_optab, GET_MODE (x));
4715 if (icode == CODE_FOR_nothing)
4716 return 0;
4718 if (!insn_operand_matches (icode, 0, x)
4719 || !insn_operand_matches (icode, 1, x)
4720 || !insn_operand_matches (icode, 2, y))
4721 return 0;
4723 return 1;
4726 /* Generate and return an insn body to subtract Y from X. */
4729 gen_sub2_insn (rtx x, rtx y)
4731 enum insn_code icode = optab_handler (sub_optab, GET_MODE (x));
4733 gcc_assert (insn_operand_matches (icode, 0, x));
4734 gcc_assert (insn_operand_matches (icode, 1, x));
4735 gcc_assert (insn_operand_matches (icode, 2, y));
4737 return GEN_FCN (icode) (x, x, y);
4740 /* Generate and return an insn body to subtract r1 and c,
4741 storing the result in r0. */
4744 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4746 enum insn_code icode = optab_handler (sub_optab, GET_MODE (r0));
4748 if (icode == CODE_FOR_nothing
4749 || !insn_operand_matches (icode, 0, r0)
4750 || !insn_operand_matches (icode, 1, r1)
4751 || !insn_operand_matches (icode, 2, c))
4752 return NULL_RTX;
4754 return GEN_FCN (icode) (r0, r1, c);
4758 have_sub2_insn (rtx x, rtx y)
4760 enum insn_code icode;
4762 gcc_assert (GET_MODE (x) != VOIDmode);
4764 icode = optab_handler (sub_optab, GET_MODE (x));
4766 if (icode == CODE_FOR_nothing)
4767 return 0;
4769 if (!insn_operand_matches (icode, 0, x)
4770 || !insn_operand_matches (icode, 1, x)
4771 || !insn_operand_matches (icode, 2, y))
4772 return 0;
4774 return 1;
4777 /* Generate the body of an instruction to copy Y into X.
4778 It may be a list of insns, if one insn isn't enough. */
4781 gen_move_insn (rtx x, rtx y)
4783 rtx seq;
4785 start_sequence ();
4786 emit_move_insn_1 (x, y);
4787 seq = get_insns ();
4788 end_sequence ();
4789 return seq;
4792 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4793 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4794 no such operation exists, CODE_FOR_nothing will be returned. */
4796 enum insn_code
4797 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4798 int unsignedp)
4800 convert_optab tab;
4801 #ifdef HAVE_ptr_extend
4802 if (unsignedp < 0)
4803 return CODE_FOR_ptr_extend;
4804 #endif
4806 tab = unsignedp ? zext_optab : sext_optab;
4807 return convert_optab_handler (tab, to_mode, from_mode);
4810 /* Generate the body of an insn to extend Y (with mode MFROM)
4811 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4814 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4815 enum machine_mode mfrom, int unsignedp)
4817 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4818 return GEN_FCN (icode) (x, y);
4821 /* can_fix_p and can_float_p say whether the target machine
4822 can directly convert a given fixed point type to
4823 a given floating point type, or vice versa.
4824 The returned value is the CODE_FOR_... value to use,
4825 or CODE_FOR_nothing if these modes cannot be directly converted.
4827 *TRUNCP_PTR is set to 1 if it is necessary to output
4828 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4830 static enum insn_code
4831 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4832 int unsignedp, int *truncp_ptr)
4834 convert_optab tab;
4835 enum insn_code icode;
4837 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4838 icode = convert_optab_handler (tab, fixmode, fltmode);
4839 if (icode != CODE_FOR_nothing)
4841 *truncp_ptr = 0;
4842 return icode;
4845 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4846 for this to work. We need to rework the fix* and ftrunc* patterns
4847 and documentation. */
4848 tab = unsignedp ? ufix_optab : sfix_optab;
4849 icode = convert_optab_handler (tab, fixmode, fltmode);
4850 if (icode != CODE_FOR_nothing
4851 && optab_handler (ftrunc_optab, fltmode) != CODE_FOR_nothing)
4853 *truncp_ptr = 1;
4854 return icode;
4857 *truncp_ptr = 0;
4858 return CODE_FOR_nothing;
4861 enum insn_code
4862 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4863 int unsignedp)
4865 convert_optab tab;
4867 tab = unsignedp ? ufloat_optab : sfloat_optab;
4868 return convert_optab_handler (tab, fltmode, fixmode);
4871 /* Function supportable_convert_operation
4873 Check whether an operation represented by the code CODE is a
4874 convert operation that is supported by the target platform in
4875 vector form (i.e., when operating on arguments of type VECTYPE_IN
4876 producing a result of type VECTYPE_OUT).
4878 Convert operations we currently support directly are FIX_TRUNC and FLOAT.
4879 This function checks if these operations are supported
4880 by the target platform either directly (via vector tree-codes), or via
4881 target builtins.
4883 Output:
4884 - CODE1 is code of vector operation to be used when
4885 vectorizing the operation, if available.
4886 - DECL is decl of target builtin functions to be used
4887 when vectorizing the operation, if available. In this case,
4888 CODE1 is CALL_EXPR. */
4890 bool
4891 supportable_convert_operation (enum tree_code code,
4892 tree vectype_out, tree vectype_in,
4893 tree *decl, enum tree_code *code1)
4895 enum machine_mode m1,m2;
4896 int truncp;
4898 m1 = TYPE_MODE (vectype_out);
4899 m2 = TYPE_MODE (vectype_in);
4901 /* First check if we can done conversion directly. */
4902 if ((code == FIX_TRUNC_EXPR
4903 && can_fix_p (m1,m2,TYPE_UNSIGNED (vectype_out), &truncp)
4904 != CODE_FOR_nothing)
4905 || (code == FLOAT_EXPR
4906 && can_float_p (m1,m2,TYPE_UNSIGNED (vectype_in))
4907 != CODE_FOR_nothing))
4909 *code1 = code;
4910 return true;
4913 /* Now check for builtin. */
4914 if (targetm.vectorize.builtin_conversion
4915 && targetm.vectorize.builtin_conversion (code, vectype_out, vectype_in))
4917 *code1 = CALL_EXPR;
4918 *decl = targetm.vectorize.builtin_conversion (code, vectype_out, vectype_in);
4919 return true;
4921 return false;
4925 /* Generate code to convert FROM to floating point
4926 and store in TO. FROM must be fixed point and not VOIDmode.
4927 UNSIGNEDP nonzero means regard FROM as unsigned.
4928 Normally this is done by correcting the final value
4929 if it is negative. */
4931 void
4932 expand_float (rtx to, rtx from, int unsignedp)
4934 enum insn_code icode;
4935 rtx target = to;
4936 enum machine_mode fmode, imode;
4937 bool can_do_signed = false;
4939 /* Crash now, because we won't be able to decide which mode to use. */
4940 gcc_assert (GET_MODE (from) != VOIDmode);
4942 /* Look for an insn to do the conversion. Do it in the specified
4943 modes if possible; otherwise convert either input, output or both to
4944 wider mode. If the integer mode is wider than the mode of FROM,
4945 we can do the conversion signed even if the input is unsigned. */
4947 for (fmode = GET_MODE (to); fmode != VOIDmode;
4948 fmode = GET_MODE_WIDER_MODE (fmode))
4949 for (imode = GET_MODE (from); imode != VOIDmode;
4950 imode = GET_MODE_WIDER_MODE (imode))
4952 int doing_unsigned = unsignedp;
4954 if (fmode != GET_MODE (to)
4955 && significand_size (fmode) < GET_MODE_PRECISION (GET_MODE (from)))
4956 continue;
4958 icode = can_float_p (fmode, imode, unsignedp);
4959 if (icode == CODE_FOR_nothing && unsignedp)
4961 enum insn_code scode = can_float_p (fmode, imode, 0);
4962 if (scode != CODE_FOR_nothing)
4963 can_do_signed = true;
4964 if (imode != GET_MODE (from))
4965 icode = scode, doing_unsigned = 0;
4968 if (icode != CODE_FOR_nothing)
4970 if (imode != GET_MODE (from))
4971 from = convert_to_mode (imode, from, unsignedp);
4973 if (fmode != GET_MODE (to))
4974 target = gen_reg_rtx (fmode);
4976 emit_unop_insn (icode, target, from,
4977 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4979 if (target != to)
4980 convert_move (to, target, 0);
4981 return;
4985 /* Unsigned integer, and no way to convert directly. Convert as signed,
4986 then unconditionally adjust the result. */
4987 if (unsignedp && can_do_signed)
4989 rtx label = gen_label_rtx ();
4990 rtx temp;
4991 REAL_VALUE_TYPE offset;
4993 /* Look for a usable floating mode FMODE wider than the source and at
4994 least as wide as the target. Using FMODE will avoid rounding woes
4995 with unsigned values greater than the signed maximum value. */
4997 for (fmode = GET_MODE (to); fmode != VOIDmode;
4998 fmode = GET_MODE_WIDER_MODE (fmode))
4999 if (GET_MODE_PRECISION (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
5000 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
5001 break;
5003 if (fmode == VOIDmode)
5005 /* There is no such mode. Pretend the target is wide enough. */
5006 fmode = GET_MODE (to);
5008 /* Avoid double-rounding when TO is narrower than FROM. */
5009 if ((significand_size (fmode) + 1)
5010 < GET_MODE_PRECISION (GET_MODE (from)))
5012 rtx temp1;
5013 rtx neglabel = gen_label_rtx ();
5015 /* Don't use TARGET if it isn't a register, is a hard register,
5016 or is the wrong mode. */
5017 if (!REG_P (target)
5018 || REGNO (target) < FIRST_PSEUDO_REGISTER
5019 || GET_MODE (target) != fmode)
5020 target = gen_reg_rtx (fmode);
5022 imode = GET_MODE (from);
5023 do_pending_stack_adjust ();
5025 /* Test whether the sign bit is set. */
5026 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
5027 0, neglabel);
5029 /* The sign bit is not set. Convert as signed. */
5030 expand_float (target, from, 0);
5031 emit_jump_insn (gen_jump (label));
5032 emit_barrier ();
5034 /* The sign bit is set.
5035 Convert to a usable (positive signed) value by shifting right
5036 one bit, while remembering if a nonzero bit was shifted
5037 out; i.e., compute (from & 1) | (from >> 1). */
5039 emit_label (neglabel);
5040 temp = expand_binop (imode, and_optab, from, const1_rtx,
5041 NULL_RTX, 1, OPTAB_LIB_WIDEN);
5042 temp1 = expand_shift (RSHIFT_EXPR, imode, from, 1, NULL_RTX, 1);
5043 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
5044 OPTAB_LIB_WIDEN);
5045 expand_float (target, temp, 0);
5047 /* Multiply by 2 to undo the shift above. */
5048 temp = expand_binop (fmode, add_optab, target, target,
5049 target, 0, OPTAB_LIB_WIDEN);
5050 if (temp != target)
5051 emit_move_insn (target, temp);
5053 do_pending_stack_adjust ();
5054 emit_label (label);
5055 goto done;
5059 /* If we are about to do some arithmetic to correct for an
5060 unsigned operand, do it in a pseudo-register. */
5062 if (GET_MODE (to) != fmode
5063 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
5064 target = gen_reg_rtx (fmode);
5066 /* Convert as signed integer to floating. */
5067 expand_float (target, from, 0);
5069 /* If FROM is negative (and therefore TO is negative),
5070 correct its value by 2**bitwidth. */
5072 do_pending_stack_adjust ();
5073 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
5074 0, label);
5077 real_2expN (&offset, GET_MODE_PRECISION (GET_MODE (from)), fmode);
5078 temp = expand_binop (fmode, add_optab, target,
5079 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
5080 target, 0, OPTAB_LIB_WIDEN);
5081 if (temp != target)
5082 emit_move_insn (target, temp);
5084 do_pending_stack_adjust ();
5085 emit_label (label);
5086 goto done;
5089 /* No hardware instruction available; call a library routine. */
5091 rtx libfunc;
5092 rtx insns;
5093 rtx value;
5094 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
5096 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
5097 from = convert_to_mode (SImode, from, unsignedp);
5099 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
5100 gcc_assert (libfunc);
5102 start_sequence ();
5104 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5105 GET_MODE (to), 1, from,
5106 GET_MODE (from));
5107 insns = get_insns ();
5108 end_sequence ();
5110 emit_libcall_block (insns, target, value,
5111 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FLOAT : FLOAT,
5112 GET_MODE (to), from));
5115 done:
5117 /* Copy result to requested destination
5118 if we have been computing in a temp location. */
5120 if (target != to)
5122 if (GET_MODE (target) == GET_MODE (to))
5123 emit_move_insn (to, target);
5124 else
5125 convert_move (to, target, 0);
5129 /* Generate code to convert FROM to fixed point and store in TO. FROM
5130 must be floating point. */
5132 void
5133 expand_fix (rtx to, rtx from, int unsignedp)
5135 enum insn_code icode;
5136 rtx target = to;
5137 enum machine_mode fmode, imode;
5138 int must_trunc = 0;
5140 /* We first try to find a pair of modes, one real and one integer, at
5141 least as wide as FROM and TO, respectively, in which we can open-code
5142 this conversion. If the integer mode is wider than the mode of TO,
5143 we can do the conversion either signed or unsigned. */
5145 for (fmode = GET_MODE (from); fmode != VOIDmode;
5146 fmode = GET_MODE_WIDER_MODE (fmode))
5147 for (imode = GET_MODE (to); imode != VOIDmode;
5148 imode = GET_MODE_WIDER_MODE (imode))
5150 int doing_unsigned = unsignedp;
5152 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
5153 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
5154 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
5156 if (icode != CODE_FOR_nothing)
5158 rtx last = get_last_insn ();
5159 if (fmode != GET_MODE (from))
5160 from = convert_to_mode (fmode, from, 0);
5162 if (must_trunc)
5164 rtx temp = gen_reg_rtx (GET_MODE (from));
5165 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
5166 temp, 0);
5169 if (imode != GET_MODE (to))
5170 target = gen_reg_rtx (imode);
5172 if (maybe_emit_unop_insn (icode, target, from,
5173 doing_unsigned ? UNSIGNED_FIX : FIX))
5175 if (target != to)
5176 convert_move (to, target, unsignedp);
5177 return;
5179 delete_insns_since (last);
5183 /* For an unsigned conversion, there is one more way to do it.
5184 If we have a signed conversion, we generate code that compares
5185 the real value to the largest representable positive number. If if
5186 is smaller, the conversion is done normally. Otherwise, subtract
5187 one plus the highest signed number, convert, and add it back.
5189 We only need to check all real modes, since we know we didn't find
5190 anything with a wider integer mode.
5192 This code used to extend FP value into mode wider than the destination.
5193 This is needed for decimal float modes which cannot accurately
5194 represent one plus the highest signed number of the same size, but
5195 not for binary modes. Consider, for instance conversion from SFmode
5196 into DImode.
5198 The hot path through the code is dealing with inputs smaller than 2^63
5199 and doing just the conversion, so there is no bits to lose.
5201 In the other path we know the value is positive in the range 2^63..2^64-1
5202 inclusive. (as for other input overflow happens and result is undefined)
5203 So we know that the most important bit set in mantissa corresponds to
5204 2^63. The subtraction of 2^63 should not generate any rounding as it
5205 simply clears out that bit. The rest is trivial. */
5207 if (unsignedp && GET_MODE_PRECISION (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
5208 for (fmode = GET_MODE (from); fmode != VOIDmode;
5209 fmode = GET_MODE_WIDER_MODE (fmode))
5210 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0, &must_trunc)
5211 && (!DECIMAL_FLOAT_MODE_P (fmode)
5212 || GET_MODE_BITSIZE (fmode) > GET_MODE_PRECISION (GET_MODE (to))))
5214 int bitsize;
5215 REAL_VALUE_TYPE offset;
5216 rtx limit, lab1, lab2, insn;
5218 bitsize = GET_MODE_PRECISION (GET_MODE (to));
5219 real_2expN (&offset, bitsize - 1, fmode);
5220 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
5221 lab1 = gen_label_rtx ();
5222 lab2 = gen_label_rtx ();
5224 if (fmode != GET_MODE (from))
5225 from = convert_to_mode (fmode, from, 0);
5227 /* See if we need to do the subtraction. */
5228 do_pending_stack_adjust ();
5229 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
5230 0, lab1);
5232 /* If not, do the signed "fix" and branch around fixup code. */
5233 expand_fix (to, from, 0);
5234 emit_jump_insn (gen_jump (lab2));
5235 emit_barrier ();
5237 /* Otherwise, subtract 2**(N-1), convert to signed number,
5238 then add 2**(N-1). Do the addition using XOR since this
5239 will often generate better code. */
5240 emit_label (lab1);
5241 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
5242 NULL_RTX, 0, OPTAB_LIB_WIDEN);
5243 expand_fix (to, target, 0);
5244 target = expand_binop (GET_MODE (to), xor_optab, to,
5245 gen_int_mode
5246 ((HOST_WIDE_INT) 1 << (bitsize - 1),
5247 GET_MODE (to)),
5248 to, 1, OPTAB_LIB_WIDEN);
5250 if (target != to)
5251 emit_move_insn (to, target);
5253 emit_label (lab2);
5255 if (optab_handler (mov_optab, GET_MODE (to)) != CODE_FOR_nothing)
5257 /* Make a place for a REG_NOTE and add it. */
5258 insn = emit_move_insn (to, to);
5259 set_dst_reg_note (insn, REG_EQUAL,
5260 gen_rtx_fmt_e (UNSIGNED_FIX, GET_MODE (to),
5261 copy_rtx (from)),
5262 to);
5265 return;
5268 /* We can't do it with an insn, so use a library call. But first ensure
5269 that the mode of TO is at least as wide as SImode, since those are the
5270 only library calls we know about. */
5272 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
5274 target = gen_reg_rtx (SImode);
5276 expand_fix (target, from, unsignedp);
5278 else
5280 rtx insns;
5281 rtx value;
5282 rtx libfunc;
5284 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
5285 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
5286 gcc_assert (libfunc);
5288 start_sequence ();
5290 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5291 GET_MODE (to), 1, from,
5292 GET_MODE (from));
5293 insns = get_insns ();
5294 end_sequence ();
5296 emit_libcall_block (insns, target, value,
5297 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
5298 GET_MODE (to), from));
5301 if (target != to)
5303 if (GET_MODE (to) == GET_MODE (target))
5304 emit_move_insn (to, target);
5305 else
5306 convert_move (to, target, 0);
5310 /* Generate code to convert FROM or TO a fixed-point.
5311 If UINTP is true, either TO or FROM is an unsigned integer.
5312 If SATP is true, we need to saturate the result. */
5314 void
5315 expand_fixed_convert (rtx to, rtx from, int uintp, int satp)
5317 enum machine_mode to_mode = GET_MODE (to);
5318 enum machine_mode from_mode = GET_MODE (from);
5319 convert_optab tab;
5320 enum rtx_code this_code;
5321 enum insn_code code;
5322 rtx insns, value;
5323 rtx libfunc;
5325 if (to_mode == from_mode)
5327 emit_move_insn (to, from);
5328 return;
5331 if (uintp)
5333 tab = satp ? satfractuns_optab : fractuns_optab;
5334 this_code = satp ? UNSIGNED_SAT_FRACT : UNSIGNED_FRACT_CONVERT;
5336 else
5338 tab = satp ? satfract_optab : fract_optab;
5339 this_code = satp ? SAT_FRACT : FRACT_CONVERT;
5341 code = convert_optab_handler (tab, to_mode, from_mode);
5342 if (code != CODE_FOR_nothing)
5344 emit_unop_insn (code, to, from, this_code);
5345 return;
5348 libfunc = convert_optab_libfunc (tab, to_mode, from_mode);
5349 gcc_assert (libfunc);
5351 start_sequence ();
5352 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, to_mode,
5353 1, from, from_mode);
5354 insns = get_insns ();
5355 end_sequence ();
5357 emit_libcall_block (insns, to, value,
5358 gen_rtx_fmt_e (tab->code, to_mode, from));
5361 /* Generate code to convert FROM to fixed point and store in TO. FROM
5362 must be floating point, TO must be signed. Use the conversion optab
5363 TAB to do the conversion. */
5365 bool
5366 expand_sfix_optab (rtx to, rtx from, convert_optab tab)
5368 enum insn_code icode;
5369 rtx target = to;
5370 enum machine_mode fmode, imode;
5372 /* We first try to find a pair of modes, one real and one integer, at
5373 least as wide as FROM and TO, respectively, in which we can open-code
5374 this conversion. If the integer mode is wider than the mode of TO,
5375 we can do the conversion either signed or unsigned. */
5377 for (fmode = GET_MODE (from); fmode != VOIDmode;
5378 fmode = GET_MODE_WIDER_MODE (fmode))
5379 for (imode = GET_MODE (to); imode != VOIDmode;
5380 imode = GET_MODE_WIDER_MODE (imode))
5382 icode = convert_optab_handler (tab, imode, fmode);
5383 if (icode != CODE_FOR_nothing)
5385 rtx last = get_last_insn ();
5386 if (fmode != GET_MODE (from))
5387 from = convert_to_mode (fmode, from, 0);
5389 if (imode != GET_MODE (to))
5390 target = gen_reg_rtx (imode);
5392 if (!maybe_emit_unop_insn (icode, target, from, UNKNOWN))
5394 delete_insns_since (last);
5395 continue;
5397 if (target != to)
5398 convert_move (to, target, 0);
5399 return true;
5403 return false;
5406 /* Report whether we have an instruction to perform the operation
5407 specified by CODE on operands of mode MODE. */
5409 have_insn_for (enum rtx_code code, enum machine_mode mode)
5411 return (code_to_optab[(int) code] != 0
5412 && (optab_handler (code_to_optab[(int) code], mode)
5413 != CODE_FOR_nothing));
5416 /* Set all insn_code fields to CODE_FOR_nothing. */
5418 static void
5419 init_insn_codes (void)
5421 memset (optab_table, 0, sizeof (optab_table));
5422 memset (convert_optab_table, 0, sizeof (convert_optab_table));
5423 memset (direct_optab_table, 0, sizeof (direct_optab_table));
5426 /* Initialize OP's code to CODE, and write it into the code_to_optab table. */
5427 static inline void
5428 init_optab (optab op, enum rtx_code code)
5430 op->code = code;
5431 code_to_optab[(int) code] = op;
5434 /* Same, but fill in its code as CODE, and do _not_ write it into
5435 the code_to_optab table. */
5436 static inline void
5437 init_optabv (optab op, enum rtx_code code)
5439 op->code = code;
5442 /* Conversion optabs never go in the code_to_optab table. */
5443 static void
5444 init_convert_optab (convert_optab op, enum rtx_code code)
5446 op->code = code;
5449 /* Initialize the libfunc fields of an entire group of entries in some
5450 optab. Each entry is set equal to a string consisting of a leading
5451 pair of underscores followed by a generic operation name followed by
5452 a mode name (downshifted to lowercase) followed by a single character
5453 representing the number of operands for the given operation (which is
5454 usually one of the characters '2', '3', or '4').
5456 OPTABLE is the table in which libfunc fields are to be initialized.
5457 OPNAME is the generic (string) name of the operation.
5458 SUFFIX is the character which specifies the number of operands for
5459 the given generic operation.
5460 MODE is the mode to generate for.
5463 static void
5464 gen_libfunc (optab optable, const char *opname, int suffix, enum machine_mode mode)
5466 unsigned opname_len = strlen (opname);
5467 const char *mname = GET_MODE_NAME (mode);
5468 unsigned mname_len = strlen (mname);
5469 int prefix_len = targetm.libfunc_gnu_prefix ? 6 : 2;
5470 int len = prefix_len + opname_len + mname_len + 1 + 1;
5471 char *libfunc_name = XALLOCAVEC (char, len);
5472 char *p;
5473 const char *q;
5475 p = libfunc_name;
5476 *p++ = '_';
5477 *p++ = '_';
5478 if (targetm.libfunc_gnu_prefix)
5480 *p++ = 'g';
5481 *p++ = 'n';
5482 *p++ = 'u';
5483 *p++ = '_';
5485 for (q = opname; *q; )
5486 *p++ = *q++;
5487 for (q = mname; *q; q++)
5488 *p++ = TOLOWER (*q);
5489 *p++ = suffix;
5490 *p = '\0';
5492 set_optab_libfunc (optable, mode,
5493 ggc_alloc_string (libfunc_name, p - libfunc_name));
5496 /* Like gen_libfunc, but verify that integer operation is involved. */
5498 static void
5499 gen_int_libfunc (optab optable, const char *opname, char suffix,
5500 enum machine_mode mode)
5502 int maxsize = 2 * BITS_PER_WORD;
5504 if (GET_MODE_CLASS (mode) != MODE_INT)
5505 return;
5506 if (maxsize < LONG_LONG_TYPE_SIZE)
5507 maxsize = LONG_LONG_TYPE_SIZE;
5508 if (GET_MODE_CLASS (mode) != MODE_INT
5509 || mode < word_mode || GET_MODE_BITSIZE (mode) > maxsize)
5510 return;
5511 gen_libfunc (optable, opname, suffix, mode);
5514 /* Like gen_libfunc, but verify that FP and set decimal prefix if needed. */
5516 static void
5517 gen_fp_libfunc (optab optable, const char *opname, char suffix,
5518 enum machine_mode mode)
5520 char *dec_opname;
5522 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5523 gen_libfunc (optable, opname, suffix, mode);
5524 if (DECIMAL_FLOAT_MODE_P (mode))
5526 dec_opname = XALLOCAVEC (char, sizeof (DECIMAL_PREFIX) + strlen (opname));
5527 /* For BID support, change the name to have either a bid_ or dpd_ prefix
5528 depending on the low level floating format used. */
5529 memcpy (dec_opname, DECIMAL_PREFIX, sizeof (DECIMAL_PREFIX) - 1);
5530 strcpy (dec_opname + sizeof (DECIMAL_PREFIX) - 1, opname);
5531 gen_libfunc (optable, dec_opname, suffix, mode);
5535 /* Like gen_libfunc, but verify that fixed-point operation is involved. */
5537 static void
5538 gen_fixed_libfunc (optab optable, const char *opname, char suffix,
5539 enum machine_mode mode)
5541 if (!ALL_FIXED_POINT_MODE_P (mode))
5542 return;
5543 gen_libfunc (optable, opname, suffix, mode);
5546 /* Like gen_libfunc, but verify that signed fixed-point operation is
5547 involved. */
5549 static void
5550 gen_signed_fixed_libfunc (optab optable, const char *opname, char suffix,
5551 enum machine_mode mode)
5553 if (!SIGNED_FIXED_POINT_MODE_P (mode))
5554 return;
5555 gen_libfunc (optable, opname, suffix, mode);
5558 /* Like gen_libfunc, but verify that unsigned fixed-point operation is
5559 involved. */
5561 static void
5562 gen_unsigned_fixed_libfunc (optab optable, const char *opname, char suffix,
5563 enum machine_mode mode)
5565 if (!UNSIGNED_FIXED_POINT_MODE_P (mode))
5566 return;
5567 gen_libfunc (optable, opname, suffix, mode);
5570 /* Like gen_libfunc, but verify that FP or INT operation is involved. */
5572 static void
5573 gen_int_fp_libfunc (optab optable, const char *name, char suffix,
5574 enum machine_mode mode)
5576 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5577 gen_fp_libfunc (optable, name, suffix, mode);
5578 if (INTEGRAL_MODE_P (mode))
5579 gen_int_libfunc (optable, name, suffix, mode);
5582 /* Like gen_libfunc, but verify that FP or INT operation is involved
5583 and add 'v' suffix for integer operation. */
5585 static void
5586 gen_intv_fp_libfunc (optab optable, const char *name, char suffix,
5587 enum machine_mode mode)
5589 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5590 gen_fp_libfunc (optable, name, suffix, mode);
5591 if (GET_MODE_CLASS (mode) == MODE_INT)
5593 int len = strlen (name);
5594 char *v_name = XALLOCAVEC (char, len + 2);
5595 strcpy (v_name, name);
5596 v_name[len] = 'v';
5597 v_name[len + 1] = 0;
5598 gen_int_libfunc (optable, v_name, suffix, mode);
5602 /* Like gen_libfunc, but verify that FP or INT or FIXED operation is
5603 involved. */
5605 static void
5606 gen_int_fp_fixed_libfunc (optab optable, const char *name, char suffix,
5607 enum machine_mode mode)
5609 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5610 gen_fp_libfunc (optable, name, suffix, mode);
5611 if (INTEGRAL_MODE_P (mode))
5612 gen_int_libfunc (optable, name, suffix, mode);
5613 if (ALL_FIXED_POINT_MODE_P (mode))
5614 gen_fixed_libfunc (optable, name, suffix, mode);
5617 /* Like gen_libfunc, but verify that FP or INT or signed FIXED operation is
5618 involved. */
5620 static void
5621 gen_int_fp_signed_fixed_libfunc (optab optable, const char *name, char suffix,
5622 enum machine_mode mode)
5624 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5625 gen_fp_libfunc (optable, name, suffix, mode);
5626 if (INTEGRAL_MODE_P (mode))
5627 gen_int_libfunc (optable, name, suffix, mode);
5628 if (SIGNED_FIXED_POINT_MODE_P (mode))
5629 gen_signed_fixed_libfunc (optable, name, suffix, mode);
5632 /* Like gen_libfunc, but verify that INT or FIXED operation is
5633 involved. */
5635 static void
5636 gen_int_fixed_libfunc (optab optable, const char *name, char suffix,
5637 enum machine_mode mode)
5639 if (INTEGRAL_MODE_P (mode))
5640 gen_int_libfunc (optable, name, suffix, mode);
5641 if (ALL_FIXED_POINT_MODE_P (mode))
5642 gen_fixed_libfunc (optable, name, suffix, mode);
5645 /* Like gen_libfunc, but verify that INT or signed FIXED operation is
5646 involved. */
5648 static void
5649 gen_int_signed_fixed_libfunc (optab optable, const char *name, char suffix,
5650 enum machine_mode mode)
5652 if (INTEGRAL_MODE_P (mode))
5653 gen_int_libfunc (optable, name, suffix, mode);
5654 if (SIGNED_FIXED_POINT_MODE_P (mode))
5655 gen_signed_fixed_libfunc (optable, name, suffix, mode);
5658 /* Like gen_libfunc, but verify that INT or unsigned FIXED operation is
5659 involved. */
5661 static void
5662 gen_int_unsigned_fixed_libfunc (optab optable, const char *name, char suffix,
5663 enum machine_mode mode)
5665 if (INTEGRAL_MODE_P (mode))
5666 gen_int_libfunc (optable, name, suffix, mode);
5667 if (UNSIGNED_FIXED_POINT_MODE_P (mode))
5668 gen_unsigned_fixed_libfunc (optable, name, suffix, mode);
5671 /* Initialize the libfunc fields of an entire group of entries of an
5672 inter-mode-class conversion optab. The string formation rules are
5673 similar to the ones for init_libfuncs, above, but instead of having
5674 a mode name and an operand count these functions have two mode names
5675 and no operand count. */
5677 static void
5678 gen_interclass_conv_libfunc (convert_optab tab,
5679 const char *opname,
5680 enum machine_mode tmode,
5681 enum machine_mode fmode)
5683 size_t opname_len = strlen (opname);
5684 size_t mname_len = 0;
5686 const char *fname, *tname;
5687 const char *q;
5688 int prefix_len = targetm.libfunc_gnu_prefix ? 6 : 2;
5689 char *libfunc_name, *suffix;
5690 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5691 char *p;
5693 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5694 depends on which underlying decimal floating point format is used. */
5695 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5697 mname_len = strlen (GET_MODE_NAME (tmode)) + strlen (GET_MODE_NAME (fmode));
5699 nondec_name = XALLOCAVEC (char, prefix_len + opname_len + mname_len + 1 + 1);
5700 nondec_name[0] = '_';
5701 nondec_name[1] = '_';
5702 if (targetm.libfunc_gnu_prefix)
5704 nondec_name[2] = 'g';
5705 nondec_name[3] = 'n';
5706 nondec_name[4] = 'u';
5707 nondec_name[5] = '_';
5710 memcpy (&nondec_name[prefix_len], opname, opname_len);
5711 nondec_suffix = nondec_name + opname_len + prefix_len;
5713 dec_name = XALLOCAVEC (char, 2 + dec_len + opname_len + mname_len + 1 + 1);
5714 dec_name[0] = '_';
5715 dec_name[1] = '_';
5716 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5717 memcpy (&dec_name[2+dec_len], opname, opname_len);
5718 dec_suffix = dec_name + dec_len + opname_len + 2;
5720 fname = GET_MODE_NAME (fmode);
5721 tname = GET_MODE_NAME (tmode);
5723 if (DECIMAL_FLOAT_MODE_P(fmode) || DECIMAL_FLOAT_MODE_P(tmode))
5725 libfunc_name = dec_name;
5726 suffix = dec_suffix;
5728 else
5730 libfunc_name = nondec_name;
5731 suffix = nondec_suffix;
5734 p = suffix;
5735 for (q = fname; *q; p++, q++)
5736 *p = TOLOWER (*q);
5737 for (q = tname; *q; p++, q++)
5738 *p = TOLOWER (*q);
5740 *p = '\0';
5742 set_conv_libfunc (tab, tmode, fmode,
5743 ggc_alloc_string (libfunc_name, p - libfunc_name));
5746 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5747 int->fp conversion. */
5749 static void
5750 gen_int_to_fp_conv_libfunc (convert_optab tab,
5751 const char *opname,
5752 enum machine_mode tmode,
5753 enum machine_mode fmode)
5755 if (GET_MODE_CLASS (fmode) != MODE_INT)
5756 return;
5757 if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
5758 return;
5759 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5762 /* ufloat_optab is special by using floatun for FP and floatuns decimal fp
5763 naming scheme. */
5765 static void
5766 gen_ufloat_conv_libfunc (convert_optab tab,
5767 const char *opname ATTRIBUTE_UNUSED,
5768 enum machine_mode tmode,
5769 enum machine_mode fmode)
5771 if (DECIMAL_FLOAT_MODE_P (tmode))
5772 gen_int_to_fp_conv_libfunc (tab, "floatuns", tmode, fmode);
5773 else
5774 gen_int_to_fp_conv_libfunc (tab, "floatun", tmode, fmode);
5777 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5778 fp->int conversion. */
5780 static void
5781 gen_int_to_fp_nondecimal_conv_libfunc (convert_optab tab,
5782 const char *opname,
5783 enum machine_mode tmode,
5784 enum machine_mode fmode)
5786 if (GET_MODE_CLASS (fmode) != MODE_INT)
5787 return;
5788 if (GET_MODE_CLASS (tmode) != MODE_FLOAT)
5789 return;
5790 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5793 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5794 fp->int conversion with no decimal floating point involved. */
5796 static void
5797 gen_fp_to_int_conv_libfunc (convert_optab tab,
5798 const char *opname,
5799 enum machine_mode tmode,
5800 enum machine_mode fmode)
5802 if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
5803 return;
5804 if (GET_MODE_CLASS (tmode) != MODE_INT)
5805 return;
5806 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5809 /* Initialize the libfunc fields of an of an intra-mode-class conversion optab.
5810 The string formation rules are
5811 similar to the ones for init_libfunc, above. */
5813 static void
5814 gen_intraclass_conv_libfunc (convert_optab tab, const char *opname,
5815 enum machine_mode tmode, enum machine_mode fmode)
5817 size_t opname_len = strlen (opname);
5818 size_t mname_len = 0;
5820 const char *fname, *tname;
5821 const char *q;
5822 int prefix_len = targetm.libfunc_gnu_prefix ? 6 : 2;
5823 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5824 char *libfunc_name, *suffix;
5825 char *p;
5827 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5828 depends on which underlying decimal floating point format is used. */
5829 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5831 mname_len = strlen (GET_MODE_NAME (tmode)) + strlen (GET_MODE_NAME (fmode));
5833 nondec_name = XALLOCAVEC (char, 2 + opname_len + mname_len + 1 + 1);
5834 nondec_name[0] = '_';
5835 nondec_name[1] = '_';
5836 if (targetm.libfunc_gnu_prefix)
5838 nondec_name[2] = 'g';
5839 nondec_name[3] = 'n';
5840 nondec_name[4] = 'u';
5841 nondec_name[5] = '_';
5843 memcpy (&nondec_name[prefix_len], opname, opname_len);
5844 nondec_suffix = nondec_name + opname_len + prefix_len;
5846 dec_name = XALLOCAVEC (char, 2 + dec_len + opname_len + mname_len + 1 + 1);
5847 dec_name[0] = '_';
5848 dec_name[1] = '_';
5849 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5850 memcpy (&dec_name[2 + dec_len], opname, opname_len);
5851 dec_suffix = dec_name + dec_len + opname_len + 2;
5853 fname = GET_MODE_NAME (fmode);
5854 tname = GET_MODE_NAME (tmode);
5856 if (DECIMAL_FLOAT_MODE_P(fmode) || DECIMAL_FLOAT_MODE_P(tmode))
5858 libfunc_name = dec_name;
5859 suffix = dec_suffix;
5861 else
5863 libfunc_name = nondec_name;
5864 suffix = nondec_suffix;
5867 p = suffix;
5868 for (q = fname; *q; p++, q++)
5869 *p = TOLOWER (*q);
5870 for (q = tname; *q; p++, q++)
5871 *p = TOLOWER (*q);
5873 *p++ = '2';
5874 *p = '\0';
5876 set_conv_libfunc (tab, tmode, fmode,
5877 ggc_alloc_string (libfunc_name, p - libfunc_name));
5880 /* Pick proper libcall for trunc_optab. We need to chose if we do
5881 truncation or extension and interclass or intraclass. */
5883 static void
5884 gen_trunc_conv_libfunc (convert_optab tab,
5885 const char *opname,
5886 enum machine_mode tmode,
5887 enum machine_mode fmode)
5889 if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
5890 return;
5891 if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
5892 return;
5893 if (tmode == fmode)
5894 return;
5896 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (fmode))
5897 || (GET_MODE_CLASS (fmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (tmode)))
5898 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5900 if (GET_MODE_PRECISION (fmode) <= GET_MODE_PRECISION (tmode))
5901 return;
5903 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT
5904 && GET_MODE_CLASS (fmode) == MODE_FLOAT)
5905 || (DECIMAL_FLOAT_MODE_P (fmode) && DECIMAL_FLOAT_MODE_P (tmode)))
5906 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5909 /* Pick proper libcall for extend_optab. We need to chose if we do
5910 truncation or extension and interclass or intraclass. */
5912 static void
5913 gen_extend_conv_libfunc (convert_optab tab,
5914 const char *opname ATTRIBUTE_UNUSED,
5915 enum machine_mode tmode,
5916 enum machine_mode fmode)
5918 if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
5919 return;
5920 if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
5921 return;
5922 if (tmode == fmode)
5923 return;
5925 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (fmode))
5926 || (GET_MODE_CLASS (fmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (tmode)))
5927 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5929 if (GET_MODE_PRECISION (fmode) > GET_MODE_PRECISION (tmode))
5930 return;
5932 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT
5933 && GET_MODE_CLASS (fmode) == MODE_FLOAT)
5934 || (DECIMAL_FLOAT_MODE_P (fmode) && DECIMAL_FLOAT_MODE_P (tmode)))
5935 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5938 /* Pick proper libcall for fract_optab. We need to chose if we do
5939 interclass or intraclass. */
5941 static void
5942 gen_fract_conv_libfunc (convert_optab tab,
5943 const char *opname,
5944 enum machine_mode tmode,
5945 enum machine_mode fmode)
5947 if (tmode == fmode)
5948 return;
5949 if (!(ALL_FIXED_POINT_MODE_P (tmode) || ALL_FIXED_POINT_MODE_P (fmode)))
5950 return;
5952 if (GET_MODE_CLASS (tmode) == GET_MODE_CLASS (fmode))
5953 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5954 else
5955 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5958 /* Pick proper libcall for fractuns_optab. */
5960 static void
5961 gen_fractuns_conv_libfunc (convert_optab tab,
5962 const char *opname,
5963 enum machine_mode tmode,
5964 enum machine_mode fmode)
5966 if (tmode == fmode)
5967 return;
5968 /* One mode must be a fixed-point mode, and the other must be an integer
5969 mode. */
5970 if (!((ALL_FIXED_POINT_MODE_P (tmode) && GET_MODE_CLASS (fmode) == MODE_INT)
5971 || (ALL_FIXED_POINT_MODE_P (fmode)
5972 && GET_MODE_CLASS (tmode) == MODE_INT)))
5973 return;
5975 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5978 /* Pick proper libcall for satfract_optab. We need to chose if we do
5979 interclass or intraclass. */
5981 static void
5982 gen_satfract_conv_libfunc (convert_optab tab,
5983 const char *opname,
5984 enum machine_mode tmode,
5985 enum machine_mode fmode)
5987 if (tmode == fmode)
5988 return;
5989 /* TMODE must be a fixed-point mode. */
5990 if (!ALL_FIXED_POINT_MODE_P (tmode))
5991 return;
5993 if (GET_MODE_CLASS (tmode) == GET_MODE_CLASS (fmode))
5994 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5995 else
5996 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5999 /* Pick proper libcall for satfractuns_optab. */
6001 static void
6002 gen_satfractuns_conv_libfunc (convert_optab tab,
6003 const char *opname,
6004 enum machine_mode tmode,
6005 enum machine_mode fmode)
6007 if (tmode == fmode)
6008 return;
6009 /* TMODE must be a fixed-point mode, and FMODE must be an integer mode. */
6010 if (!(ALL_FIXED_POINT_MODE_P (tmode) && GET_MODE_CLASS (fmode) == MODE_INT))
6011 return;
6013 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
6016 /* A table of previously-created libfuncs, hashed by name. */
6017 static GTY ((param_is (union tree_node))) htab_t libfunc_decls;
6019 /* Hashtable callbacks for libfunc_decls. */
6021 static hashval_t
6022 libfunc_decl_hash (const void *entry)
6024 return IDENTIFIER_HASH_VALUE (DECL_NAME ((const_tree) entry));
6027 static int
6028 libfunc_decl_eq (const void *entry1, const void *entry2)
6030 return DECL_NAME ((const_tree) entry1) == (const_tree) entry2;
6033 /* Build a decl for a libfunc named NAME. */
6035 tree
6036 build_libfunc_function (const char *name)
6038 tree decl = build_decl (UNKNOWN_LOCATION, FUNCTION_DECL,
6039 get_identifier (name),
6040 build_function_type (integer_type_node, NULL_TREE));
6041 /* ??? We don't have any type information except for this is
6042 a function. Pretend this is "int foo()". */
6043 DECL_ARTIFICIAL (decl) = 1;
6044 DECL_EXTERNAL (decl) = 1;
6045 TREE_PUBLIC (decl) = 1;
6046 gcc_assert (DECL_ASSEMBLER_NAME (decl));
6048 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
6049 are the flags assigned by targetm.encode_section_info. */
6050 SET_SYMBOL_REF_DECL (XEXP (DECL_RTL (decl), 0), NULL);
6052 return decl;
6056 init_one_libfunc (const char *name)
6058 tree id, decl;
6059 void **slot;
6060 hashval_t hash;
6062 if (libfunc_decls == NULL)
6063 libfunc_decls = htab_create_ggc (37, libfunc_decl_hash,
6064 libfunc_decl_eq, NULL);
6066 /* See if we have already created a libfunc decl for this function. */
6067 id = get_identifier (name);
6068 hash = IDENTIFIER_HASH_VALUE (id);
6069 slot = htab_find_slot_with_hash (libfunc_decls, id, hash, INSERT);
6070 decl = (tree) *slot;
6071 if (decl == NULL)
6073 /* Create a new decl, so that it can be passed to
6074 targetm.encode_section_info. */
6075 decl = build_libfunc_function (name);
6076 *slot = decl;
6078 return XEXP (DECL_RTL (decl), 0);
6081 /* Adjust the assembler name of libfunc NAME to ASMSPEC. */
6084 set_user_assembler_libfunc (const char *name, const char *asmspec)
6086 tree id, decl;
6087 void **slot;
6088 hashval_t hash;
6090 id = get_identifier (name);
6091 hash = IDENTIFIER_HASH_VALUE (id);
6092 slot = htab_find_slot_with_hash (libfunc_decls, id, hash, NO_INSERT);
6093 gcc_assert (slot);
6094 decl = (tree) *slot;
6095 set_user_assembler_name (decl, asmspec);
6096 return XEXP (DECL_RTL (decl), 0);
6099 /* Call this to reset the function entry for one optab (OPTABLE) in mode
6100 MODE to NAME, which should be either 0 or a string constant. */
6101 void
6102 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
6104 rtx val;
6105 struct libfunc_entry e;
6106 struct libfunc_entry **slot;
6107 e.optab = (size_t) (optable - &optab_table[0]);
6108 e.mode1 = mode;
6109 e.mode2 = VOIDmode;
6111 if (name)
6112 val = init_one_libfunc (name);
6113 else
6114 val = 0;
6115 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, INSERT);
6116 if (*slot == NULL)
6117 *slot = ggc_alloc_libfunc_entry ();
6118 (*slot)->optab = (size_t) (optable - &optab_table[0]);
6119 (*slot)->mode1 = mode;
6120 (*slot)->mode2 = VOIDmode;
6121 (*slot)->libfunc = val;
6124 /* Call this to reset the function entry for one conversion optab
6125 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
6126 either 0 or a string constant. */
6127 void
6128 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
6129 enum machine_mode fmode, const char *name)
6131 rtx val;
6132 struct libfunc_entry e;
6133 struct libfunc_entry **slot;
6134 e.optab = (size_t) (optable - &convert_optab_table[0]);
6135 e.mode1 = tmode;
6136 e.mode2 = fmode;
6138 if (name)
6139 val = init_one_libfunc (name);
6140 else
6141 val = 0;
6142 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, INSERT);
6143 if (*slot == NULL)
6144 *slot = ggc_alloc_libfunc_entry ();
6145 (*slot)->optab = (size_t) (optable - &convert_optab_table[0]);
6146 (*slot)->mode1 = tmode;
6147 (*slot)->mode2 = fmode;
6148 (*slot)->libfunc = val;
6151 /* Call this to initialize the contents of the optabs
6152 appropriately for the current target machine. */
6154 void
6155 init_optabs (void)
6157 if (libfunc_hash)
6159 htab_empty (libfunc_hash);
6160 /* We statically initialize the insn_codes with the equivalent of
6161 CODE_FOR_nothing. Repeat the process if reinitialising. */
6162 init_insn_codes ();
6164 else
6165 libfunc_hash = htab_create_ggc (10, hash_libfunc, eq_libfunc, NULL);
6167 init_optab (add_optab, PLUS);
6168 init_optabv (addv_optab, PLUS);
6169 init_optab (sub_optab, MINUS);
6170 init_optabv (subv_optab, MINUS);
6171 init_optab (ssadd_optab, SS_PLUS);
6172 init_optab (usadd_optab, US_PLUS);
6173 init_optab (sssub_optab, SS_MINUS);
6174 init_optab (ussub_optab, US_MINUS);
6175 init_optab (smul_optab, MULT);
6176 init_optab (ssmul_optab, SS_MULT);
6177 init_optab (usmul_optab, US_MULT);
6178 init_optabv (smulv_optab, MULT);
6179 init_optab (smul_highpart_optab, UNKNOWN);
6180 init_optab (umul_highpart_optab, UNKNOWN);
6181 init_optab (smul_widen_optab, UNKNOWN);
6182 init_optab (umul_widen_optab, UNKNOWN);
6183 init_optab (usmul_widen_optab, UNKNOWN);
6184 init_optab (smadd_widen_optab, UNKNOWN);
6185 init_optab (umadd_widen_optab, UNKNOWN);
6186 init_optab (ssmadd_widen_optab, UNKNOWN);
6187 init_optab (usmadd_widen_optab, UNKNOWN);
6188 init_optab (smsub_widen_optab, UNKNOWN);
6189 init_optab (umsub_widen_optab, UNKNOWN);
6190 init_optab (ssmsub_widen_optab, UNKNOWN);
6191 init_optab (usmsub_widen_optab, UNKNOWN);
6192 init_optab (sdiv_optab, DIV);
6193 init_optab (ssdiv_optab, SS_DIV);
6194 init_optab (usdiv_optab, US_DIV);
6195 init_optabv (sdivv_optab, DIV);
6196 init_optab (sdivmod_optab, UNKNOWN);
6197 init_optab (udiv_optab, UDIV);
6198 init_optab (udivmod_optab, UNKNOWN);
6199 init_optab (smod_optab, MOD);
6200 init_optab (umod_optab, UMOD);
6201 init_optab (fmod_optab, UNKNOWN);
6202 init_optab (remainder_optab, UNKNOWN);
6203 init_optab (ftrunc_optab, UNKNOWN);
6204 init_optab (and_optab, AND);
6205 init_optab (ior_optab, IOR);
6206 init_optab (xor_optab, XOR);
6207 init_optab (ashl_optab, ASHIFT);
6208 init_optab (ssashl_optab, SS_ASHIFT);
6209 init_optab (usashl_optab, US_ASHIFT);
6210 init_optab (ashr_optab, ASHIFTRT);
6211 init_optab (lshr_optab, LSHIFTRT);
6212 init_optabv (vashl_optab, ASHIFT);
6213 init_optabv (vashr_optab, ASHIFTRT);
6214 init_optabv (vlshr_optab, LSHIFTRT);
6215 init_optab (rotl_optab, ROTATE);
6216 init_optab (rotr_optab, ROTATERT);
6217 init_optab (smin_optab, SMIN);
6218 init_optab (smax_optab, SMAX);
6219 init_optab (umin_optab, UMIN);
6220 init_optab (umax_optab, UMAX);
6221 init_optab (pow_optab, UNKNOWN);
6222 init_optab (atan2_optab, UNKNOWN);
6223 init_optab (fma_optab, FMA);
6224 init_optab (fms_optab, UNKNOWN);
6225 init_optab (fnma_optab, UNKNOWN);
6226 init_optab (fnms_optab, UNKNOWN);
6228 /* These three have codes assigned exclusively for the sake of
6229 have_insn_for. */
6230 init_optab (mov_optab, SET);
6231 init_optab (movstrict_optab, STRICT_LOW_PART);
6232 init_optab (cbranch_optab, COMPARE);
6234 init_optab (cmov_optab, UNKNOWN);
6235 init_optab (cstore_optab, UNKNOWN);
6236 init_optab (ctrap_optab, UNKNOWN);
6238 init_optab (storent_optab, UNKNOWN);
6240 init_optab (cmp_optab, UNKNOWN);
6241 init_optab (ucmp_optab, UNKNOWN);
6243 init_optab (eq_optab, EQ);
6244 init_optab (ne_optab, NE);
6245 init_optab (gt_optab, GT);
6246 init_optab (ge_optab, GE);
6247 init_optab (lt_optab, LT);
6248 init_optab (le_optab, LE);
6249 init_optab (unord_optab, UNORDERED);
6251 init_optab (neg_optab, NEG);
6252 init_optab (ssneg_optab, SS_NEG);
6253 init_optab (usneg_optab, US_NEG);
6254 init_optabv (negv_optab, NEG);
6255 init_optab (abs_optab, ABS);
6256 init_optabv (absv_optab, ABS);
6257 init_optab (addcc_optab, UNKNOWN);
6258 init_optab (one_cmpl_optab, NOT);
6259 init_optab (bswap_optab, BSWAP);
6260 init_optab (ffs_optab, FFS);
6261 init_optab (clz_optab, CLZ);
6262 init_optab (ctz_optab, CTZ);
6263 init_optab (clrsb_optab, CLRSB);
6264 init_optab (popcount_optab, POPCOUNT);
6265 init_optab (parity_optab, PARITY);
6266 init_optab (sqrt_optab, SQRT);
6267 init_optab (floor_optab, UNKNOWN);
6268 init_optab (ceil_optab, UNKNOWN);
6269 init_optab (round_optab, UNKNOWN);
6270 init_optab (btrunc_optab, UNKNOWN);
6271 init_optab (nearbyint_optab, UNKNOWN);
6272 init_optab (rint_optab, UNKNOWN);
6273 init_optab (sincos_optab, UNKNOWN);
6274 init_optab (sin_optab, UNKNOWN);
6275 init_optab (asin_optab, UNKNOWN);
6276 init_optab (cos_optab, UNKNOWN);
6277 init_optab (acos_optab, UNKNOWN);
6278 init_optab (exp_optab, UNKNOWN);
6279 init_optab (exp10_optab, UNKNOWN);
6280 init_optab (exp2_optab, UNKNOWN);
6281 init_optab (expm1_optab, UNKNOWN);
6282 init_optab (ldexp_optab, UNKNOWN);
6283 init_optab (scalb_optab, UNKNOWN);
6284 init_optab (significand_optab, UNKNOWN);
6285 init_optab (logb_optab, UNKNOWN);
6286 init_optab (ilogb_optab, UNKNOWN);
6287 init_optab (log_optab, UNKNOWN);
6288 init_optab (log10_optab, UNKNOWN);
6289 init_optab (log2_optab, UNKNOWN);
6290 init_optab (log1p_optab, UNKNOWN);
6291 init_optab (tan_optab, UNKNOWN);
6292 init_optab (atan_optab, UNKNOWN);
6293 init_optab (copysign_optab, UNKNOWN);
6294 init_optab (signbit_optab, UNKNOWN);
6296 init_optab (isinf_optab, UNKNOWN);
6298 init_optab (strlen_optab, UNKNOWN);
6299 init_optab (push_optab, UNKNOWN);
6301 init_optab (reduc_smax_optab, UNKNOWN);
6302 init_optab (reduc_umax_optab, UNKNOWN);
6303 init_optab (reduc_smin_optab, UNKNOWN);
6304 init_optab (reduc_umin_optab, UNKNOWN);
6305 init_optab (reduc_splus_optab, UNKNOWN);
6306 init_optab (reduc_uplus_optab, UNKNOWN);
6308 init_optab (ssum_widen_optab, UNKNOWN);
6309 init_optab (usum_widen_optab, UNKNOWN);
6310 init_optab (sdot_prod_optab, UNKNOWN);
6311 init_optab (udot_prod_optab, UNKNOWN);
6313 init_optab (vec_extract_optab, UNKNOWN);
6314 init_optab (vec_set_optab, UNKNOWN);
6315 init_optab (vec_init_optab, UNKNOWN);
6316 init_optab (vec_shl_optab, UNKNOWN);
6317 init_optab (vec_shr_optab, UNKNOWN);
6318 init_optab (vec_realign_load_optab, UNKNOWN);
6319 init_optab (movmisalign_optab, UNKNOWN);
6320 init_optab (vec_widen_umult_hi_optab, UNKNOWN);
6321 init_optab (vec_widen_umult_lo_optab, UNKNOWN);
6322 init_optab (vec_widen_smult_hi_optab, UNKNOWN);
6323 init_optab (vec_widen_smult_lo_optab, UNKNOWN);
6324 init_optab (vec_widen_ushiftl_hi_optab, UNKNOWN);
6325 init_optab (vec_widen_ushiftl_lo_optab, UNKNOWN);
6326 init_optab (vec_widen_sshiftl_hi_optab, UNKNOWN);
6327 init_optab (vec_widen_sshiftl_lo_optab, UNKNOWN);
6328 init_optab (vec_unpacks_hi_optab, UNKNOWN);
6329 init_optab (vec_unpacks_lo_optab, UNKNOWN);
6330 init_optab (vec_unpacku_hi_optab, UNKNOWN);
6331 init_optab (vec_unpacku_lo_optab, UNKNOWN);
6332 init_optab (vec_unpacks_float_hi_optab, UNKNOWN);
6333 init_optab (vec_unpacks_float_lo_optab, UNKNOWN);
6334 init_optab (vec_unpacku_float_hi_optab, UNKNOWN);
6335 init_optab (vec_unpacku_float_lo_optab, UNKNOWN);
6336 init_optab (vec_pack_trunc_optab, UNKNOWN);
6337 init_optab (vec_pack_usat_optab, UNKNOWN);
6338 init_optab (vec_pack_ssat_optab, UNKNOWN);
6339 init_optab (vec_pack_ufix_trunc_optab, UNKNOWN);
6340 init_optab (vec_pack_sfix_trunc_optab, UNKNOWN);
6342 init_optab (powi_optab, UNKNOWN);
6344 /* Conversions. */
6345 init_convert_optab (sext_optab, SIGN_EXTEND);
6346 init_convert_optab (zext_optab, ZERO_EXTEND);
6347 init_convert_optab (trunc_optab, TRUNCATE);
6348 init_convert_optab (sfix_optab, FIX);
6349 init_convert_optab (ufix_optab, UNSIGNED_FIX);
6350 init_convert_optab (sfixtrunc_optab, UNKNOWN);
6351 init_convert_optab (ufixtrunc_optab, UNKNOWN);
6352 init_convert_optab (sfloat_optab, FLOAT);
6353 init_convert_optab (ufloat_optab, UNSIGNED_FLOAT);
6354 init_convert_optab (lrint_optab, UNKNOWN);
6355 init_convert_optab (lround_optab, UNKNOWN);
6356 init_convert_optab (lfloor_optab, UNKNOWN);
6357 init_convert_optab (lceil_optab, UNKNOWN);
6359 init_convert_optab (fract_optab, FRACT_CONVERT);
6360 init_convert_optab (fractuns_optab, UNSIGNED_FRACT_CONVERT);
6361 init_convert_optab (satfract_optab, SAT_FRACT);
6362 init_convert_optab (satfractuns_optab, UNSIGNED_SAT_FRACT);
6364 /* Fill in the optabs with the insns we support. */
6365 init_all_optabs ();
6367 /* Initialize the optabs with the names of the library functions. */
6368 add_optab->libcall_basename = "add";
6369 add_optab->libcall_suffix = '3';
6370 add_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6371 addv_optab->libcall_basename = "add";
6372 addv_optab->libcall_suffix = '3';
6373 addv_optab->libcall_gen = gen_intv_fp_libfunc;
6374 ssadd_optab->libcall_basename = "ssadd";
6375 ssadd_optab->libcall_suffix = '3';
6376 ssadd_optab->libcall_gen = gen_signed_fixed_libfunc;
6377 usadd_optab->libcall_basename = "usadd";
6378 usadd_optab->libcall_suffix = '3';
6379 usadd_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6380 sub_optab->libcall_basename = "sub";
6381 sub_optab->libcall_suffix = '3';
6382 sub_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6383 subv_optab->libcall_basename = "sub";
6384 subv_optab->libcall_suffix = '3';
6385 subv_optab->libcall_gen = gen_intv_fp_libfunc;
6386 sssub_optab->libcall_basename = "sssub";
6387 sssub_optab->libcall_suffix = '3';
6388 sssub_optab->libcall_gen = gen_signed_fixed_libfunc;
6389 ussub_optab->libcall_basename = "ussub";
6390 ussub_optab->libcall_suffix = '3';
6391 ussub_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6392 smul_optab->libcall_basename = "mul";
6393 smul_optab->libcall_suffix = '3';
6394 smul_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6395 smulv_optab->libcall_basename = "mul";
6396 smulv_optab->libcall_suffix = '3';
6397 smulv_optab->libcall_gen = gen_intv_fp_libfunc;
6398 ssmul_optab->libcall_basename = "ssmul";
6399 ssmul_optab->libcall_suffix = '3';
6400 ssmul_optab->libcall_gen = gen_signed_fixed_libfunc;
6401 usmul_optab->libcall_basename = "usmul";
6402 usmul_optab->libcall_suffix = '3';
6403 usmul_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6404 sdiv_optab->libcall_basename = "div";
6405 sdiv_optab->libcall_suffix = '3';
6406 sdiv_optab->libcall_gen = gen_int_fp_signed_fixed_libfunc;
6407 sdivv_optab->libcall_basename = "divv";
6408 sdivv_optab->libcall_suffix = '3';
6409 sdivv_optab->libcall_gen = gen_int_libfunc;
6410 ssdiv_optab->libcall_basename = "ssdiv";
6411 ssdiv_optab->libcall_suffix = '3';
6412 ssdiv_optab->libcall_gen = gen_signed_fixed_libfunc;
6413 udiv_optab->libcall_basename = "udiv";
6414 udiv_optab->libcall_suffix = '3';
6415 udiv_optab->libcall_gen = gen_int_unsigned_fixed_libfunc;
6416 usdiv_optab->libcall_basename = "usdiv";
6417 usdiv_optab->libcall_suffix = '3';
6418 usdiv_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6419 sdivmod_optab->libcall_basename = "divmod";
6420 sdivmod_optab->libcall_suffix = '4';
6421 sdivmod_optab->libcall_gen = gen_int_libfunc;
6422 udivmod_optab->libcall_basename = "udivmod";
6423 udivmod_optab->libcall_suffix = '4';
6424 udivmod_optab->libcall_gen = gen_int_libfunc;
6425 smod_optab->libcall_basename = "mod";
6426 smod_optab->libcall_suffix = '3';
6427 smod_optab->libcall_gen = gen_int_libfunc;
6428 umod_optab->libcall_basename = "umod";
6429 umod_optab->libcall_suffix = '3';
6430 umod_optab->libcall_gen = gen_int_libfunc;
6431 ftrunc_optab->libcall_basename = "ftrunc";
6432 ftrunc_optab->libcall_suffix = '2';
6433 ftrunc_optab->libcall_gen = gen_fp_libfunc;
6434 and_optab->libcall_basename = "and";
6435 and_optab->libcall_suffix = '3';
6436 and_optab->libcall_gen = gen_int_libfunc;
6437 ior_optab->libcall_basename = "ior";
6438 ior_optab->libcall_suffix = '3';
6439 ior_optab->libcall_gen = gen_int_libfunc;
6440 xor_optab->libcall_basename = "xor";
6441 xor_optab->libcall_suffix = '3';
6442 xor_optab->libcall_gen = gen_int_libfunc;
6443 ashl_optab->libcall_basename = "ashl";
6444 ashl_optab->libcall_suffix = '3';
6445 ashl_optab->libcall_gen = gen_int_fixed_libfunc;
6446 ssashl_optab->libcall_basename = "ssashl";
6447 ssashl_optab->libcall_suffix = '3';
6448 ssashl_optab->libcall_gen = gen_signed_fixed_libfunc;
6449 usashl_optab->libcall_basename = "usashl";
6450 usashl_optab->libcall_suffix = '3';
6451 usashl_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6452 ashr_optab->libcall_basename = "ashr";
6453 ashr_optab->libcall_suffix = '3';
6454 ashr_optab->libcall_gen = gen_int_signed_fixed_libfunc;
6455 lshr_optab->libcall_basename = "lshr";
6456 lshr_optab->libcall_suffix = '3';
6457 lshr_optab->libcall_gen = gen_int_unsigned_fixed_libfunc;
6458 smin_optab->libcall_basename = "min";
6459 smin_optab->libcall_suffix = '3';
6460 smin_optab->libcall_gen = gen_int_fp_libfunc;
6461 smax_optab->libcall_basename = "max";
6462 smax_optab->libcall_suffix = '3';
6463 smax_optab->libcall_gen = gen_int_fp_libfunc;
6464 umin_optab->libcall_basename = "umin";
6465 umin_optab->libcall_suffix = '3';
6466 umin_optab->libcall_gen = gen_int_libfunc;
6467 umax_optab->libcall_basename = "umax";
6468 umax_optab->libcall_suffix = '3';
6469 umax_optab->libcall_gen = gen_int_libfunc;
6470 neg_optab->libcall_basename = "neg";
6471 neg_optab->libcall_suffix = '2';
6472 neg_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6473 ssneg_optab->libcall_basename = "ssneg";
6474 ssneg_optab->libcall_suffix = '2';
6475 ssneg_optab->libcall_gen = gen_signed_fixed_libfunc;
6476 usneg_optab->libcall_basename = "usneg";
6477 usneg_optab->libcall_suffix = '2';
6478 usneg_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6479 negv_optab->libcall_basename = "neg";
6480 negv_optab->libcall_suffix = '2';
6481 negv_optab->libcall_gen = gen_intv_fp_libfunc;
6482 one_cmpl_optab->libcall_basename = "one_cmpl";
6483 one_cmpl_optab->libcall_suffix = '2';
6484 one_cmpl_optab->libcall_gen = gen_int_libfunc;
6485 ffs_optab->libcall_basename = "ffs";
6486 ffs_optab->libcall_suffix = '2';
6487 ffs_optab->libcall_gen = gen_int_libfunc;
6488 clz_optab->libcall_basename = "clz";
6489 clz_optab->libcall_suffix = '2';
6490 clz_optab->libcall_gen = gen_int_libfunc;
6491 ctz_optab->libcall_basename = "ctz";
6492 ctz_optab->libcall_suffix = '2';
6493 ctz_optab->libcall_gen = gen_int_libfunc;
6494 clrsb_optab->libcall_basename = "clrsb";
6495 clrsb_optab->libcall_suffix = '2';
6496 clrsb_optab->libcall_gen = gen_int_libfunc;
6497 popcount_optab->libcall_basename = "popcount";
6498 popcount_optab->libcall_suffix = '2';
6499 popcount_optab->libcall_gen = gen_int_libfunc;
6500 parity_optab->libcall_basename = "parity";
6501 parity_optab->libcall_suffix = '2';
6502 parity_optab->libcall_gen = gen_int_libfunc;
6504 /* Comparison libcalls for integers MUST come in pairs,
6505 signed/unsigned. */
6506 cmp_optab->libcall_basename = "cmp";
6507 cmp_optab->libcall_suffix = '2';
6508 cmp_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6509 ucmp_optab->libcall_basename = "ucmp";
6510 ucmp_optab->libcall_suffix = '2';
6511 ucmp_optab->libcall_gen = gen_int_libfunc;
6513 /* EQ etc are floating point only. */
6514 eq_optab->libcall_basename = "eq";
6515 eq_optab->libcall_suffix = '2';
6516 eq_optab->libcall_gen = gen_fp_libfunc;
6517 ne_optab->libcall_basename = "ne";
6518 ne_optab->libcall_suffix = '2';
6519 ne_optab->libcall_gen = gen_fp_libfunc;
6520 gt_optab->libcall_basename = "gt";
6521 gt_optab->libcall_suffix = '2';
6522 gt_optab->libcall_gen = gen_fp_libfunc;
6523 ge_optab->libcall_basename = "ge";
6524 ge_optab->libcall_suffix = '2';
6525 ge_optab->libcall_gen = gen_fp_libfunc;
6526 lt_optab->libcall_basename = "lt";
6527 lt_optab->libcall_suffix = '2';
6528 lt_optab->libcall_gen = gen_fp_libfunc;
6529 le_optab->libcall_basename = "le";
6530 le_optab->libcall_suffix = '2';
6531 le_optab->libcall_gen = gen_fp_libfunc;
6532 unord_optab->libcall_basename = "unord";
6533 unord_optab->libcall_suffix = '2';
6534 unord_optab->libcall_gen = gen_fp_libfunc;
6536 powi_optab->libcall_basename = "powi";
6537 powi_optab->libcall_suffix = '2';
6538 powi_optab->libcall_gen = gen_fp_libfunc;
6540 /* Conversions. */
6541 sfloat_optab->libcall_basename = "float";
6542 sfloat_optab->libcall_gen = gen_int_to_fp_conv_libfunc;
6543 ufloat_optab->libcall_gen = gen_ufloat_conv_libfunc;
6544 sfix_optab->libcall_basename = "fix";
6545 sfix_optab->libcall_gen = gen_fp_to_int_conv_libfunc;
6546 ufix_optab->libcall_basename = "fixuns";
6547 ufix_optab->libcall_gen = gen_fp_to_int_conv_libfunc;
6548 lrint_optab->libcall_basename = "lrint";
6549 lrint_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6550 lround_optab->libcall_basename = "lround";
6551 lround_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6552 lfloor_optab->libcall_basename = "lfloor";
6553 lfloor_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6554 lceil_optab->libcall_basename = "lceil";
6555 lceil_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6557 /* trunc_optab is also used for FLOAT_EXTEND. */
6558 sext_optab->libcall_basename = "extend";
6559 sext_optab->libcall_gen = gen_extend_conv_libfunc;
6560 trunc_optab->libcall_basename = "trunc";
6561 trunc_optab->libcall_gen = gen_trunc_conv_libfunc;
6563 /* Conversions for fixed-point modes and other modes. */
6564 fract_optab->libcall_basename = "fract";
6565 fract_optab->libcall_gen = gen_fract_conv_libfunc;
6566 satfract_optab->libcall_basename = "satfract";
6567 satfract_optab->libcall_gen = gen_satfract_conv_libfunc;
6568 fractuns_optab->libcall_basename = "fractuns";
6569 fractuns_optab->libcall_gen = gen_fractuns_conv_libfunc;
6570 satfractuns_optab->libcall_basename = "satfractuns";
6571 satfractuns_optab->libcall_gen = gen_satfractuns_conv_libfunc;
6573 /* The ffs function operates on `int'. Fall back on it if we do not
6574 have a libgcc2 function for that width. */
6575 if (INT_TYPE_SIZE < BITS_PER_WORD)
6576 set_optab_libfunc (ffs_optab, mode_for_size (INT_TYPE_SIZE, MODE_INT, 0),
6577 "ffs");
6579 /* Explicitly initialize the bswap libfuncs since we need them to be
6580 valid for things other than word_mode. */
6581 if (targetm.libfunc_gnu_prefix)
6583 set_optab_libfunc (bswap_optab, SImode, "__gnu_bswapsi2");
6584 set_optab_libfunc (bswap_optab, DImode, "__gnu_bswapdi2");
6586 else
6588 set_optab_libfunc (bswap_optab, SImode, "__bswapsi2");
6589 set_optab_libfunc (bswap_optab, DImode, "__bswapdi2");
6592 /* Use cabs for double complex abs, since systems generally have cabs.
6593 Don't define any libcall for float complex, so that cabs will be used. */
6594 if (complex_double_type_node)
6595 set_optab_libfunc (abs_optab, TYPE_MODE (complex_double_type_node), "cabs");
6597 abort_libfunc = init_one_libfunc ("abort");
6598 memcpy_libfunc = init_one_libfunc ("memcpy");
6599 memmove_libfunc = init_one_libfunc ("memmove");
6600 memcmp_libfunc = init_one_libfunc ("memcmp");
6601 memset_libfunc = init_one_libfunc ("memset");
6602 setbits_libfunc = init_one_libfunc ("__setbits");
6604 #ifndef DONT_USE_BUILTIN_SETJMP
6605 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
6606 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
6607 #else
6608 setjmp_libfunc = init_one_libfunc ("setjmp");
6609 longjmp_libfunc = init_one_libfunc ("longjmp");
6610 #endif
6611 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
6612 unwind_sjlj_unregister_libfunc
6613 = init_one_libfunc ("_Unwind_SjLj_Unregister");
6615 /* For function entry/exit instrumentation. */
6616 profile_function_entry_libfunc
6617 = init_one_libfunc ("__cyg_profile_func_enter");
6618 profile_function_exit_libfunc
6619 = init_one_libfunc ("__cyg_profile_func_exit");
6621 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
6623 /* Allow the target to add more libcalls or rename some, etc. */
6624 targetm.init_libfuncs ();
6627 /* A helper function for init_sync_libfuncs. Using the basename BASE,
6628 install libfuncs into TAB for BASE_N for 1 <= N <= MAX. */
6630 static void
6631 init_sync_libfuncs_1 (optab tab, const char *base, int max)
6633 enum machine_mode mode;
6634 char buf[64];
6635 size_t len = strlen (base);
6636 int i;
6638 gcc_assert (max <= 8);
6639 gcc_assert (len + 3 < sizeof (buf));
6641 memcpy (buf, base, len);
6642 buf[len] = '_';
6643 buf[len + 1] = '0';
6644 buf[len + 2] = '\0';
6646 mode = QImode;
6647 for (i = 1; i <= max; i *= 2)
6649 buf[len + 1] = '0' + i;
6650 set_optab_libfunc (tab, mode, buf);
6651 mode = GET_MODE_2XWIDER_MODE (mode);
6655 void
6656 init_sync_libfuncs (int max)
6658 if (!flag_sync_libcalls)
6659 return;
6661 init_sync_libfuncs_1 (sync_compare_and_swap_optab,
6662 "__sync_val_compare_and_swap", max);
6663 init_sync_libfuncs_1 (sync_lock_test_and_set_optab,
6664 "__sync_lock_test_and_set", max);
6666 init_sync_libfuncs_1 (sync_old_add_optab, "__sync_fetch_and_add", max);
6667 init_sync_libfuncs_1 (sync_old_sub_optab, "__sync_fetch_and_sub", max);
6668 init_sync_libfuncs_1 (sync_old_ior_optab, "__sync_fetch_and_or", max);
6669 init_sync_libfuncs_1 (sync_old_and_optab, "__sync_fetch_and_and", max);
6670 init_sync_libfuncs_1 (sync_old_xor_optab, "__sync_fetch_and_xor", max);
6671 init_sync_libfuncs_1 (sync_old_nand_optab, "__sync_fetch_and_nand", max);
6673 init_sync_libfuncs_1 (sync_new_add_optab, "__sync_add_and_fetch", max);
6674 init_sync_libfuncs_1 (sync_new_sub_optab, "__sync_sub_and_fetch", max);
6675 init_sync_libfuncs_1 (sync_new_ior_optab, "__sync_or_and_fetch", max);
6676 init_sync_libfuncs_1 (sync_new_and_optab, "__sync_and_and_fetch", max);
6677 init_sync_libfuncs_1 (sync_new_xor_optab, "__sync_xor_and_fetch", max);
6678 init_sync_libfuncs_1 (sync_new_nand_optab, "__sync_nand_and_fetch", max);
6681 /* Print information about the current contents of the optabs on
6682 STDERR. */
6684 DEBUG_FUNCTION void
6685 debug_optab_libfuncs (void)
6687 int i;
6688 int j;
6689 int k;
6691 /* Dump the arithmetic optabs. */
6692 for (i = 0; i != (int) OTI_MAX; i++)
6693 for (j = 0; j < NUM_MACHINE_MODES; ++j)
6695 optab o;
6696 rtx l;
6698 o = &optab_table[i];
6699 l = optab_libfunc (o, (enum machine_mode) j);
6700 if (l)
6702 gcc_assert (GET_CODE (l) == SYMBOL_REF);
6703 fprintf (stderr, "%s\t%s:\t%s\n",
6704 GET_RTX_NAME (o->code),
6705 GET_MODE_NAME (j),
6706 XSTR (l, 0));
6710 /* Dump the conversion optabs. */
6711 for (i = 0; i < (int) COI_MAX; ++i)
6712 for (j = 0; j < NUM_MACHINE_MODES; ++j)
6713 for (k = 0; k < NUM_MACHINE_MODES; ++k)
6715 convert_optab o;
6716 rtx l;
6718 o = &convert_optab_table[i];
6719 l = convert_optab_libfunc (o, (enum machine_mode) j,
6720 (enum machine_mode) k);
6721 if (l)
6723 gcc_assert (GET_CODE (l) == SYMBOL_REF);
6724 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
6725 GET_RTX_NAME (o->code),
6726 GET_MODE_NAME (j),
6727 GET_MODE_NAME (k),
6728 XSTR (l, 0));
6734 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
6735 CODE. Return 0 on failure. */
6738 gen_cond_trap (enum rtx_code code, rtx op1, rtx op2, rtx tcode)
6740 enum machine_mode mode = GET_MODE (op1);
6741 enum insn_code icode;
6742 rtx insn;
6743 rtx trap_rtx;
6745 if (mode == VOIDmode)
6746 return 0;
6748 icode = optab_handler (ctrap_optab, mode);
6749 if (icode == CODE_FOR_nothing)
6750 return 0;
6752 /* Some targets only accept a zero trap code. */
6753 if (!insn_operand_matches (icode, 3, tcode))
6754 return 0;
6756 do_pending_stack_adjust ();
6757 start_sequence ();
6758 prepare_cmp_insn (op1, op2, code, NULL_RTX, false, OPTAB_DIRECT,
6759 &trap_rtx, &mode);
6760 if (!trap_rtx)
6761 insn = NULL_RTX;
6762 else
6763 insn = GEN_FCN (icode) (trap_rtx, XEXP (trap_rtx, 0), XEXP (trap_rtx, 1),
6764 tcode);
6766 /* If that failed, then give up. */
6767 if (insn == 0)
6769 end_sequence ();
6770 return 0;
6773 emit_insn (insn);
6774 insn = get_insns ();
6775 end_sequence ();
6776 return insn;
6779 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
6780 or unsigned operation code. */
6782 static enum rtx_code
6783 get_rtx_code (enum tree_code tcode, bool unsignedp)
6785 enum rtx_code code;
6786 switch (tcode)
6788 case EQ_EXPR:
6789 code = EQ;
6790 break;
6791 case NE_EXPR:
6792 code = NE;
6793 break;
6794 case LT_EXPR:
6795 code = unsignedp ? LTU : LT;
6796 break;
6797 case LE_EXPR:
6798 code = unsignedp ? LEU : LE;
6799 break;
6800 case GT_EXPR:
6801 code = unsignedp ? GTU : GT;
6802 break;
6803 case GE_EXPR:
6804 code = unsignedp ? GEU : GE;
6805 break;
6807 case UNORDERED_EXPR:
6808 code = UNORDERED;
6809 break;
6810 case ORDERED_EXPR:
6811 code = ORDERED;
6812 break;
6813 case UNLT_EXPR:
6814 code = UNLT;
6815 break;
6816 case UNLE_EXPR:
6817 code = UNLE;
6818 break;
6819 case UNGT_EXPR:
6820 code = UNGT;
6821 break;
6822 case UNGE_EXPR:
6823 code = UNGE;
6824 break;
6825 case UNEQ_EXPR:
6826 code = UNEQ;
6827 break;
6828 case LTGT_EXPR:
6829 code = LTGT;
6830 break;
6832 default:
6833 gcc_unreachable ();
6835 return code;
6838 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
6839 unsigned operators. Do not generate compare instruction. */
6841 static rtx
6842 vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
6844 struct expand_operand ops[2];
6845 enum rtx_code rcode;
6846 tree t_op0, t_op1;
6847 rtx rtx_op0, rtx_op1;
6849 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
6850 ensures that condition is a relational operation. */
6851 gcc_assert (COMPARISON_CLASS_P (cond));
6853 rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
6854 t_op0 = TREE_OPERAND (cond, 0);
6855 t_op1 = TREE_OPERAND (cond, 1);
6857 /* Expand operands. */
6858 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)),
6859 EXPAND_STACK_PARM);
6860 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)),
6861 EXPAND_STACK_PARM);
6863 create_input_operand (&ops[0], rtx_op0, GET_MODE (rtx_op0));
6864 create_input_operand (&ops[1], rtx_op1, GET_MODE (rtx_op1));
6865 if (!maybe_legitimize_operands (icode, 4, 2, ops))
6866 gcc_unreachable ();
6867 return gen_rtx_fmt_ee (rcode, VOIDmode, ops[0].value, ops[1].value);
6870 /* Return true if VEC_PERM_EXPR can be expanded using SIMD extensions
6871 of the CPU. SEL may be NULL, which stands for an unknown constant. */
6873 bool
6874 can_vec_perm_p (enum machine_mode mode, bool variable,
6875 const unsigned char *sel)
6877 enum machine_mode qimode;
6879 /* If the target doesn't implement a vector mode for the vector type,
6880 then no operations are supported. */
6881 if (!VECTOR_MODE_P (mode))
6882 return false;
6884 if (!variable)
6886 if (direct_optab_handler (vec_perm_const_optab, mode) != CODE_FOR_nothing
6887 && (sel == NULL
6888 || targetm.vectorize.vec_perm_const_ok == NULL
6889 || targetm.vectorize.vec_perm_const_ok (mode, sel)))
6890 return true;
6893 if (direct_optab_handler (vec_perm_optab, mode) != CODE_FOR_nothing)
6894 return true;
6896 /* We allow fallback to a QI vector mode, and adjust the mask. */
6897 if (GET_MODE_INNER (mode) == QImode)
6898 return false;
6899 qimode = mode_for_vector (QImode, GET_MODE_SIZE (mode));
6900 if (!VECTOR_MODE_P (qimode))
6901 return false;
6903 /* ??? For completeness, we ought to check the QImode version of
6904 vec_perm_const_optab. But all users of this implicit lowering
6905 feature implement the variable vec_perm_optab. */
6906 if (direct_optab_handler (vec_perm_optab, qimode) == CODE_FOR_nothing)
6907 return false;
6909 /* In order to support the lowering of variable permutations,
6910 we need to support shifts and adds. */
6911 if (variable)
6913 if (GET_MODE_UNIT_SIZE (mode) > 2
6914 && optab_handler (ashl_optab, mode) == CODE_FOR_nothing
6915 && optab_handler (vashl_optab, mode) == CODE_FOR_nothing)
6916 return false;
6917 if (optab_handler (add_optab, qimode) == CODE_FOR_nothing)
6918 return false;
6921 return true;
6924 /* A subroutine of expand_vec_perm for expanding one vec_perm insn. */
6926 static rtx
6927 expand_vec_perm_1 (enum insn_code icode, rtx target,
6928 rtx v0, rtx v1, rtx sel)
6930 enum machine_mode tmode = GET_MODE (target);
6931 enum machine_mode smode = GET_MODE (sel);
6932 struct expand_operand ops[4];
6934 create_output_operand (&ops[0], target, tmode);
6935 create_input_operand (&ops[3], sel, smode);
6937 /* Make an effort to preserve v0 == v1. The target expander is able to
6938 rely on this to determine if we're permuting a single input operand. */
6939 if (rtx_equal_p (v0, v1))
6941 if (!insn_operand_matches (icode, 1, v0))
6942 v0 = force_reg (tmode, v0);
6943 gcc_checking_assert (insn_operand_matches (icode, 1, v0));
6944 gcc_checking_assert (insn_operand_matches (icode, 2, v0));
6946 create_fixed_operand (&ops[1], v0);
6947 create_fixed_operand (&ops[2], v0);
6949 else
6951 create_input_operand (&ops[1], v0, tmode);
6952 create_input_operand (&ops[2], v1, tmode);
6955 if (maybe_expand_insn (icode, 4, ops))
6956 return ops[0].value;
6957 return NULL_RTX;
6960 /* Generate instructions for vec_perm optab given its mode
6961 and three operands. */
6964 expand_vec_perm (enum machine_mode mode, rtx v0, rtx v1, rtx sel, rtx target)
6966 enum insn_code icode;
6967 enum machine_mode qimode;
6968 unsigned int i, w, e, u;
6969 rtx tmp, sel_qi = NULL;
6970 rtvec vec;
6972 if (!target || GET_MODE (target) != mode)
6973 target = gen_reg_rtx (mode);
6975 w = GET_MODE_SIZE (mode);
6976 e = GET_MODE_NUNITS (mode);
6977 u = GET_MODE_UNIT_SIZE (mode);
6979 /* Set QIMODE to a different vector mode with byte elements.
6980 If no such mode, or if MODE already has byte elements, use VOIDmode. */
6981 qimode = VOIDmode;
6982 if (GET_MODE_INNER (mode) != QImode)
6984 qimode = mode_for_vector (QImode, w);
6985 if (!VECTOR_MODE_P (qimode))
6986 qimode = VOIDmode;
6989 /* If the input is a constant, expand it specially. */
6990 gcc_assert (GET_MODE_CLASS (GET_MODE (sel)) == MODE_VECTOR_INT);
6991 if (GET_CODE (sel) == CONST_VECTOR)
6993 icode = direct_optab_handler (vec_perm_const_optab, mode);
6994 if (icode != CODE_FOR_nothing)
6996 tmp = expand_vec_perm_1 (icode, target, v0, v1, sel);
6997 if (tmp)
6998 return tmp;
7001 /* Fall back to a constant byte-based permutation. */
7002 if (qimode != VOIDmode)
7004 vec = rtvec_alloc (w);
7005 for (i = 0; i < e; ++i)
7007 unsigned int j, this_e;
7009 this_e = INTVAL (CONST_VECTOR_ELT (sel, i));
7010 this_e &= 2 * e - 1;
7011 this_e *= u;
7013 for (j = 0; j < u; ++j)
7014 RTVEC_ELT (vec, i * u + j) = GEN_INT (this_e + j);
7016 sel_qi = gen_rtx_CONST_VECTOR (qimode, vec);
7018 icode = direct_optab_handler (vec_perm_const_optab, qimode);
7019 if (icode != CODE_FOR_nothing)
7021 tmp = expand_vec_perm_1 (icode, gen_lowpart (qimode, target),
7022 gen_lowpart (qimode, v0),
7023 gen_lowpart (qimode, v1), sel_qi);
7024 if (tmp)
7025 return gen_lowpart (mode, tmp);
7030 /* Otherwise expand as a fully variable permuation. */
7031 icode = direct_optab_handler (vec_perm_optab, mode);
7032 if (icode != CODE_FOR_nothing)
7034 tmp = expand_vec_perm_1 (icode, target, v0, v1, sel);
7035 if (tmp)
7036 return tmp;
7039 /* As a special case to aid several targets, lower the element-based
7040 permutation to a byte-based permutation and try again. */
7041 if (qimode == VOIDmode)
7042 return NULL_RTX;
7043 icode = direct_optab_handler (vec_perm_optab, qimode);
7044 if (icode == CODE_FOR_nothing)
7045 return NULL_RTX;
7047 if (sel_qi == NULL)
7049 /* Multiply each element by its byte size. */
7050 enum machine_mode selmode = GET_MODE (sel);
7051 if (u == 2)
7052 sel = expand_simple_binop (selmode, PLUS, sel, sel,
7053 sel, 0, OPTAB_DIRECT);
7054 else
7055 sel = expand_simple_binop (selmode, ASHIFT, sel,
7056 GEN_INT (exact_log2 (u)),
7057 sel, 0, OPTAB_DIRECT);
7058 gcc_assert (sel != NULL);
7060 /* Broadcast the low byte each element into each of its bytes. */
7061 vec = rtvec_alloc (w);
7062 for (i = 0; i < w; ++i)
7064 int this_e = i / u * u;
7065 if (BYTES_BIG_ENDIAN)
7066 this_e += u - 1;
7067 RTVEC_ELT (vec, i) = GEN_INT (this_e);
7069 tmp = gen_rtx_CONST_VECTOR (qimode, vec);
7070 sel = gen_lowpart (qimode, sel);
7071 sel = expand_vec_perm (qimode, sel, sel, tmp, NULL);
7072 gcc_assert (sel != NULL);
7074 /* Add the byte offset to each byte element. */
7075 /* Note that the definition of the indicies here is memory ordering,
7076 so there should be no difference between big and little endian. */
7077 vec = rtvec_alloc (w);
7078 for (i = 0; i < w; ++i)
7079 RTVEC_ELT (vec, i) = GEN_INT (i % u);
7080 tmp = gen_rtx_CONST_VECTOR (qimode, vec);
7081 sel_qi = expand_simple_binop (qimode, PLUS, sel, tmp,
7082 sel, 0, OPTAB_DIRECT);
7083 gcc_assert (sel_qi != NULL);
7086 tmp = expand_vec_perm_1 (icode, gen_lowpart (qimode, target),
7087 gen_lowpart (qimode, v0),
7088 gen_lowpart (qimode, v1), sel_qi);
7089 if (tmp)
7090 tmp = gen_lowpart (mode, tmp);
7091 return tmp;
7094 /* Return insn code for a conditional operator with a comparison in
7095 mode CMODE, unsigned if UNS is true, resulting in a value of mode VMODE. */
7097 static inline enum insn_code
7098 get_vcond_icode (enum machine_mode vmode, enum machine_mode cmode, bool uns)
7100 enum insn_code icode = CODE_FOR_nothing;
7101 if (uns)
7102 icode = convert_optab_handler (vcondu_optab, vmode, cmode);
7103 else
7104 icode = convert_optab_handler (vcond_optab, vmode, cmode);
7105 return icode;
7108 /* Return TRUE iff, appropriate vector insns are available
7109 for vector cond expr with vector type VALUE_TYPE and a comparison
7110 with operand vector types in CMP_OP_TYPE. */
7112 bool
7113 expand_vec_cond_expr_p (tree value_type, tree cmp_op_type)
7115 enum machine_mode value_mode = TYPE_MODE (value_type);
7116 enum machine_mode cmp_op_mode = TYPE_MODE (cmp_op_type);
7117 if (GET_MODE_SIZE (value_mode) != GET_MODE_SIZE (cmp_op_mode)
7118 || GET_MODE_NUNITS (value_mode) != GET_MODE_NUNITS (cmp_op_mode)
7119 || get_vcond_icode (TYPE_MODE (value_type), TYPE_MODE (cmp_op_type),
7120 TYPE_UNSIGNED (cmp_op_type)) == CODE_FOR_nothing)
7121 return false;
7122 return true;
7125 /* Generate insns for a VEC_COND_EXPR, given its TYPE and its
7126 three operands. */
7129 expand_vec_cond_expr (tree vec_cond_type, tree op0, tree op1, tree op2,
7130 rtx target)
7132 struct expand_operand ops[6];
7133 enum insn_code icode;
7134 rtx comparison, rtx_op1, rtx_op2;
7135 enum machine_mode mode = TYPE_MODE (vec_cond_type);
7136 enum machine_mode cmp_op_mode;
7137 bool unsignedp;
7139 gcc_assert (COMPARISON_CLASS_P (op0));
7141 unsignedp = TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (op0, 0)));
7142 cmp_op_mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (op0, 0)));
7144 gcc_assert (GET_MODE_SIZE (mode) == GET_MODE_SIZE (cmp_op_mode)
7145 && GET_MODE_NUNITS (mode) == GET_MODE_NUNITS (cmp_op_mode));
7147 icode = get_vcond_icode (mode, cmp_op_mode, unsignedp);
7148 if (icode == CODE_FOR_nothing)
7149 return 0;
7151 comparison = vector_compare_rtx (op0, unsignedp, icode);
7152 rtx_op1 = expand_normal (op1);
7153 rtx_op2 = expand_normal (op2);
7155 create_output_operand (&ops[0], target, mode);
7156 create_input_operand (&ops[1], rtx_op1, mode);
7157 create_input_operand (&ops[2], rtx_op2, mode);
7158 create_fixed_operand (&ops[3], comparison);
7159 create_fixed_operand (&ops[4], XEXP (comparison, 0));
7160 create_fixed_operand (&ops[5], XEXP (comparison, 1));
7161 expand_insn (icode, 6, ops);
7162 return ops[0].value;
7165 /* Return non-zero if a highpart multiply is supported of can be synthisized.
7166 For the benefit of expand_mult_highpart, the return value is 1 for direct,
7167 2 for even/odd widening, and 3 for hi/lo widening. */
7170 can_mult_highpart_p (enum machine_mode mode, bool uns_p)
7172 optab op;
7173 unsigned char *sel;
7174 unsigned i, nunits;
7176 op = uns_p ? umul_highpart_optab : smul_highpart_optab;
7177 if (optab_handler (op, mode) != CODE_FOR_nothing)
7178 return 1;
7180 /* If the mode is an integral vector, synth from widening operations. */
7181 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
7182 return 0;
7184 nunits = GET_MODE_NUNITS (mode);
7185 sel = XALLOCAVEC (unsigned char, nunits);
7187 op = uns_p ? vec_widen_umult_even_optab : vec_widen_smult_even_optab;
7188 if (optab_handler (op, mode) != CODE_FOR_nothing)
7190 op = uns_p ? vec_widen_umult_odd_optab : vec_widen_smult_odd_optab;
7191 if (optab_handler (op, mode) != CODE_FOR_nothing)
7193 for (i = 0; i < nunits; ++i)
7194 sel[i] = !BYTES_BIG_ENDIAN + (i & ~1) + ((i & 1) ? nunits : 0);
7195 if (can_vec_perm_p (mode, false, sel))
7196 return 2;
7200 op = uns_p ? vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
7201 if (optab_handler (op, mode) != CODE_FOR_nothing)
7203 op = uns_p ? vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
7204 if (optab_handler (op, mode) != CODE_FOR_nothing)
7206 for (i = 0; i < nunits; ++i)
7207 sel[i] = 2 * i + (BYTES_BIG_ENDIAN ? 0 : 1);
7208 if (can_vec_perm_p (mode, false, sel))
7209 return 3;
7213 return 0;
7216 /* Expand a highpart multiply. */
7219 expand_mult_highpart (enum machine_mode mode, rtx op0, rtx op1,
7220 rtx target, bool uns_p)
7222 struct expand_operand eops[3];
7223 enum insn_code icode;
7224 int method, i, nunits;
7225 enum machine_mode wmode;
7226 rtx m1, m2, perm;
7227 optab tab1, tab2;
7228 rtvec v;
7230 method = can_mult_highpart_p (mode, uns_p);
7231 switch (method)
7233 case 0:
7234 return NULL_RTX;
7235 case 1:
7236 tab1 = uns_p ? umul_highpart_optab : smul_highpart_optab;
7237 return expand_binop (mode, tab1, op0, op1, target, uns_p,
7238 OPTAB_LIB_WIDEN);
7239 case 2:
7240 tab1 = uns_p ? vec_widen_umult_even_optab : vec_widen_smult_even_optab;
7241 tab2 = uns_p ? vec_widen_umult_odd_optab : vec_widen_smult_odd_optab;
7242 break;
7243 case 3:
7244 tab1 = uns_p ? vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
7245 tab2 = uns_p ? vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
7246 if (BYTES_BIG_ENDIAN)
7248 optab t = tab1;
7249 tab1 = tab2;
7250 tab2 = t;
7252 break;
7253 default:
7254 gcc_unreachable ();
7257 icode = optab_handler (tab1, mode);
7258 nunits = GET_MODE_NUNITS (mode);
7259 wmode = insn_data[icode].operand[0].mode;
7260 gcc_checking_assert (2 * GET_MODE_NUNITS (wmode) == nunits);
7261 gcc_checking_assert (GET_MODE_SIZE (wmode) == GET_MODE_SIZE (mode));
7263 create_output_operand (&eops[0], gen_reg_rtx (wmode), wmode);
7264 create_input_operand (&eops[1], op0, mode);
7265 create_input_operand (&eops[2], op1, mode);
7266 expand_insn (icode, 3, eops);
7267 m1 = gen_lowpart (mode, eops[0].value);
7269 create_output_operand (&eops[0], gen_reg_rtx (wmode), wmode);
7270 create_input_operand (&eops[1], op0, mode);
7271 create_input_operand (&eops[2], op1, mode);
7272 expand_insn (optab_handler (tab2, mode), 3, eops);
7273 m2 = gen_lowpart (mode, eops[0].value);
7275 v = rtvec_alloc (nunits);
7276 if (method == 2)
7278 for (i = 0; i < nunits; ++i)
7279 RTVEC_ELT (v, i) = GEN_INT (!BYTES_BIG_ENDIAN + (i & ~1)
7280 + ((i & 1) ? nunits : 0));
7282 else
7284 for (i = 0; i < nunits; ++i)
7285 RTVEC_ELT (v, i) = GEN_INT (2 * i + (BYTES_BIG_ENDIAN ? 0 : 1));
7287 perm = gen_rtx_CONST_VECTOR (mode, v);
7289 return expand_vec_perm (mode, m1, m2, perm, target);
7292 /* Return true if there is a compare_and_swap pattern. */
7294 bool
7295 can_compare_and_swap_p (enum machine_mode mode, bool allow_libcall)
7297 enum insn_code icode;
7299 /* Check for __atomic_compare_and_swap. */
7300 icode = direct_optab_handler (atomic_compare_and_swap_optab, mode);
7301 if (icode != CODE_FOR_nothing)
7302 return true;
7304 /* Check for __sync_compare_and_swap. */
7305 icode = optab_handler (sync_compare_and_swap_optab, mode);
7306 if (icode != CODE_FOR_nothing)
7307 return true;
7308 if (allow_libcall && optab_libfunc (sync_compare_and_swap_optab, mode))
7309 return true;
7311 /* No inline compare and swap. */
7312 return false;
7315 /* Return true if an atomic exchange can be performed. */
7317 bool
7318 can_atomic_exchange_p (enum machine_mode mode, bool allow_libcall)
7320 enum insn_code icode;
7322 /* Check for __atomic_exchange. */
7323 icode = direct_optab_handler (atomic_exchange_optab, mode);
7324 if (icode != CODE_FOR_nothing)
7325 return true;
7327 /* Don't check __sync_test_and_set, as on some platforms that
7328 has reduced functionality. Targets that really do support
7329 a proper exchange should simply be updated to the __atomics. */
7331 return can_compare_and_swap_p (mode, allow_libcall);
7335 /* Helper function to find the MODE_CC set in a sync_compare_and_swap
7336 pattern. */
7338 static void
7339 find_cc_set (rtx x, const_rtx pat, void *data)
7341 if (REG_P (x) && GET_MODE_CLASS (GET_MODE (x)) == MODE_CC
7342 && GET_CODE (pat) == SET)
7344 rtx *p_cc_reg = (rtx *) data;
7345 gcc_assert (!*p_cc_reg);
7346 *p_cc_reg = x;
7350 /* This is a helper function for the other atomic operations. This function
7351 emits a loop that contains SEQ that iterates until a compare-and-swap
7352 operation at the end succeeds. MEM is the memory to be modified. SEQ is
7353 a set of instructions that takes a value from OLD_REG as an input and
7354 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
7355 set to the current contents of MEM. After SEQ, a compare-and-swap will
7356 attempt to update MEM with NEW_REG. The function returns true when the
7357 loop was generated successfully. */
7359 static bool
7360 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
7362 enum machine_mode mode = GET_MODE (mem);
7363 rtx label, cmp_reg, success, oldval;
7365 /* The loop we want to generate looks like
7367 cmp_reg = mem;
7368 label:
7369 old_reg = cmp_reg;
7370 seq;
7371 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
7372 if (success)
7373 goto label;
7375 Note that we only do the plain load from memory once. Subsequent
7376 iterations use the value loaded by the compare-and-swap pattern. */
7378 label = gen_label_rtx ();
7379 cmp_reg = gen_reg_rtx (mode);
7381 emit_move_insn (cmp_reg, mem);
7382 emit_label (label);
7383 emit_move_insn (old_reg, cmp_reg);
7384 if (seq)
7385 emit_insn (seq);
7387 success = NULL_RTX;
7388 oldval = cmp_reg;
7389 if (!expand_atomic_compare_and_swap (&success, &oldval, mem, old_reg,
7390 new_reg, false, MEMMODEL_SEQ_CST,
7391 MEMMODEL_RELAXED))
7392 return false;
7394 if (oldval != cmp_reg)
7395 emit_move_insn (cmp_reg, oldval);
7397 /* ??? Mark this jump predicted not taken? */
7398 emit_cmp_and_jump_insns (success, const0_rtx, EQ, const0_rtx,
7399 GET_MODE (success), 1, label);
7400 return true;
7404 /* This function tries to emit an atomic_exchange intruction. VAL is written
7405 to *MEM using memory model MODEL. The previous contents of *MEM are returned,
7406 using TARGET if possible. */
7408 static rtx
7409 maybe_emit_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
7411 enum machine_mode mode = GET_MODE (mem);
7412 enum insn_code icode;
7414 /* If the target supports the exchange directly, great. */
7415 icode = direct_optab_handler (atomic_exchange_optab, mode);
7416 if (icode != CODE_FOR_nothing)
7418 struct expand_operand ops[4];
7420 create_output_operand (&ops[0], target, mode);
7421 create_fixed_operand (&ops[1], mem);
7422 /* VAL may have been promoted to a wider mode. Shrink it if so. */
7423 create_convert_operand_to (&ops[2], val, mode, true);
7424 create_integer_operand (&ops[3], model);
7425 if (maybe_expand_insn (icode, 4, ops))
7426 return ops[0].value;
7429 return NULL_RTX;
7432 /* This function tries to implement an atomic exchange operation using
7433 __sync_lock_test_and_set. VAL is written to *MEM using memory model MODEL.
7434 The previous contents of *MEM are returned, using TARGET if possible.
7435 Since this instructionn is an acquire barrier only, stronger memory
7436 models may require additional barriers to be emitted. */
7438 static rtx
7439 maybe_emit_sync_lock_test_and_set (rtx target, rtx mem, rtx val,
7440 enum memmodel model)
7442 enum machine_mode mode = GET_MODE (mem);
7443 enum insn_code icode;
7444 rtx last_insn = get_last_insn ();
7446 icode = optab_handler (sync_lock_test_and_set_optab, mode);
7448 /* Legacy sync_lock_test_and_set is an acquire barrier. If the pattern
7449 exists, and the memory model is stronger than acquire, add a release
7450 barrier before the instruction. */
7452 if (model == MEMMODEL_SEQ_CST
7453 || model == MEMMODEL_RELEASE
7454 || model == MEMMODEL_ACQ_REL)
7455 expand_mem_thread_fence (model);
7457 if (icode != CODE_FOR_nothing)
7459 struct expand_operand ops[3];
7460 create_output_operand (&ops[0], target, mode);
7461 create_fixed_operand (&ops[1], mem);
7462 /* VAL may have been promoted to a wider mode. Shrink it if so. */
7463 create_convert_operand_to (&ops[2], val, mode, true);
7464 if (maybe_expand_insn (icode, 3, ops))
7465 return ops[0].value;
7468 /* If an external test-and-set libcall is provided, use that instead of
7469 any external compare-and-swap that we might get from the compare-and-
7470 swap-loop expansion later. */
7471 if (!can_compare_and_swap_p (mode, false))
7473 rtx libfunc = optab_libfunc (sync_lock_test_and_set_optab, mode);
7474 if (libfunc != NULL)
7476 rtx addr;
7478 addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
7479 return emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL,
7480 mode, 2, addr, ptr_mode,
7481 val, mode);
7485 /* If the test_and_set can't be emitted, eliminate any barrier that might
7486 have been emitted. */
7487 delete_insns_since (last_insn);
7488 return NULL_RTX;
7491 /* This function tries to implement an atomic exchange operation using a
7492 compare_and_swap loop. VAL is written to *MEM. The previous contents of
7493 *MEM are returned, using TARGET if possible. No memory model is required
7494 since a compare_and_swap loop is seq-cst. */
7496 static rtx
7497 maybe_emit_compare_and_swap_exchange_loop (rtx target, rtx mem, rtx val)
7499 enum machine_mode mode = GET_MODE (mem);
7501 if (can_compare_and_swap_p (mode, true))
7503 if (!target || !register_operand (target, mode))
7504 target = gen_reg_rtx (mode);
7505 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
7506 val = convert_modes (mode, GET_MODE (val), val, 1);
7507 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
7508 return target;
7511 return NULL_RTX;
7514 /* This function tries to implement an atomic test-and-set operation
7515 using the atomic_test_and_set instruction pattern. A boolean value
7516 is returned from the operation, using TARGET if possible. */
7518 #ifndef HAVE_atomic_test_and_set
7519 #define HAVE_atomic_test_and_set 0
7520 #define CODE_FOR_atomic_test_and_set CODE_FOR_nothing
7521 #endif
7523 static rtx
7524 maybe_emit_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
7526 enum machine_mode pat_bool_mode;
7527 struct expand_operand ops[3];
7529 if (!HAVE_atomic_test_and_set)
7530 return NULL_RTX;
7532 /* While we always get QImode from __atomic_test_and_set, we get
7533 other memory modes from __sync_lock_test_and_set. Note that we
7534 use no endian adjustment here. This matches the 4.6 behavior
7535 in the Sparc backend. */
7536 gcc_checking_assert
7537 (insn_data[CODE_FOR_atomic_test_and_set].operand[1].mode == QImode);
7538 if (GET_MODE (mem) != QImode)
7539 mem = adjust_address_nv (mem, QImode, 0);
7541 pat_bool_mode = insn_data[CODE_FOR_atomic_test_and_set].operand[0].mode;
7542 create_output_operand (&ops[0], target, pat_bool_mode);
7543 create_fixed_operand (&ops[1], mem);
7544 create_integer_operand (&ops[2], model);
7546 if (maybe_expand_insn (CODE_FOR_atomic_test_and_set, 3, ops))
7547 return ops[0].value;
7548 return NULL_RTX;
7551 /* This function expands the legacy _sync_lock test_and_set operation which is
7552 generally an atomic exchange. Some limited targets only allow the
7553 constant 1 to be stored. This is an ACQUIRE operation.
7555 TARGET is an optional place to stick the return value.
7556 MEM is where VAL is stored. */
7559 expand_sync_lock_test_and_set (rtx target, rtx mem, rtx val)
7561 rtx ret;
7563 /* Try an atomic_exchange first. */
7564 ret = maybe_emit_atomic_exchange (target, mem, val, MEMMODEL_ACQUIRE);
7565 if (ret)
7566 return ret;
7568 ret = maybe_emit_sync_lock_test_and_set (target, mem, val, MEMMODEL_ACQUIRE);
7569 if (ret)
7570 return ret;
7572 ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val);
7573 if (ret)
7574 return ret;
7576 /* If there are no other options, try atomic_test_and_set if the value
7577 being stored is 1. */
7578 if (val == const1_rtx)
7579 ret = maybe_emit_atomic_test_and_set (target, mem, MEMMODEL_ACQUIRE);
7581 return ret;
7584 /* This function expands the atomic test_and_set operation:
7585 atomically store a boolean TRUE into MEM and return the previous value.
7587 MEMMODEL is the memory model variant to use.
7588 TARGET is an optional place to stick the return value. */
7591 expand_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
7593 enum machine_mode mode = GET_MODE (mem);
7594 rtx ret, trueval, subtarget;
7596 ret = maybe_emit_atomic_test_and_set (target, mem, model);
7597 if (ret)
7598 return ret;
7600 /* Be binary compatible with non-default settings of trueval, and different
7601 cpu revisions. E.g. one revision may have atomic-test-and-set, but
7602 another only has atomic-exchange. */
7603 if (targetm.atomic_test_and_set_trueval == 1)
7605 trueval = const1_rtx;
7606 subtarget = target ? target : gen_reg_rtx (mode);
7608 else
7610 trueval = gen_int_mode (targetm.atomic_test_and_set_trueval, mode);
7611 subtarget = gen_reg_rtx (mode);
7614 /* Try the atomic-exchange optab... */
7615 ret = maybe_emit_atomic_exchange (subtarget, mem, trueval, model);
7617 /* ... then an atomic-compare-and-swap loop ... */
7618 if (!ret)
7619 ret = maybe_emit_compare_and_swap_exchange_loop (subtarget, mem, trueval);
7621 /* ... before trying the vaguely defined legacy lock_test_and_set. */
7622 if (!ret)
7623 ret = maybe_emit_sync_lock_test_and_set (subtarget, mem, trueval, model);
7625 /* Recall that the legacy lock_test_and_set optab was allowed to do magic
7626 things with the value 1. Thus we try again without trueval. */
7627 if (!ret && targetm.atomic_test_and_set_trueval != 1)
7628 ret = maybe_emit_sync_lock_test_and_set (subtarget, mem, const1_rtx, model);
7630 /* Failing all else, assume a single threaded environment and simply
7631 perform the operation. */
7632 if (!ret)
7634 emit_move_insn (subtarget, mem);
7635 emit_move_insn (mem, trueval);
7636 ret = subtarget;
7639 /* Recall that have to return a boolean value; rectify if trueval
7640 is not exactly one. */
7641 if (targetm.atomic_test_and_set_trueval != 1)
7642 ret = emit_store_flag_force (target, NE, ret, const0_rtx, mode, 0, 1);
7644 return ret;
7647 /* This function expands the atomic exchange operation:
7648 atomically store VAL in MEM and return the previous value in MEM.
7650 MEMMODEL is the memory model variant to use.
7651 TARGET is an optional place to stick the return value. */
7654 expand_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
7656 rtx ret;
7658 ret = maybe_emit_atomic_exchange (target, mem, val, model);
7660 /* Next try a compare-and-swap loop for the exchange. */
7661 if (!ret)
7662 ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val);
7664 return ret;
7667 /* This function expands the atomic compare exchange operation:
7669 *PTARGET_BOOL is an optional place to store the boolean success/failure.
7670 *PTARGET_OVAL is an optional place to store the old value from memory.
7671 Both target parameters may be NULL to indicate that we do not care about
7672 that return value. Both target parameters are updated on success to
7673 the actual location of the corresponding result.
7675 MEMMODEL is the memory model variant to use.
7677 The return value of the function is true for success. */
7679 bool
7680 expand_atomic_compare_and_swap (rtx *ptarget_bool, rtx *ptarget_oval,
7681 rtx mem, rtx expected, rtx desired,
7682 bool is_weak, enum memmodel succ_model,
7683 enum memmodel fail_model)
7685 enum machine_mode mode = GET_MODE (mem);
7686 struct expand_operand ops[8];
7687 enum insn_code icode;
7688 rtx target_oval, target_bool = NULL_RTX;
7689 rtx libfunc;
7691 /* Load expected into a register for the compare and swap. */
7692 if (MEM_P (expected))
7693 expected = copy_to_reg (expected);
7695 /* Make sure we always have some place to put the return oldval.
7696 Further, make sure that place is distinct from the input expected,
7697 just in case we need that path down below. */
7698 if (ptarget_oval == NULL
7699 || (target_oval = *ptarget_oval) == NULL
7700 || reg_overlap_mentioned_p (expected, target_oval))
7701 target_oval = gen_reg_rtx (mode);
7703 icode = direct_optab_handler (atomic_compare_and_swap_optab, mode);
7704 if (icode != CODE_FOR_nothing)
7706 enum machine_mode bool_mode = insn_data[icode].operand[0].mode;
7708 /* Make sure we always have a place for the bool operand. */
7709 if (ptarget_bool == NULL
7710 || (target_bool = *ptarget_bool) == NULL
7711 || GET_MODE (target_bool) != bool_mode)
7712 target_bool = gen_reg_rtx (bool_mode);
7714 /* Emit the compare_and_swap. */
7715 create_output_operand (&ops[0], target_bool, bool_mode);
7716 create_output_operand (&ops[1], target_oval, mode);
7717 create_fixed_operand (&ops[2], mem);
7718 create_convert_operand_to (&ops[3], expected, mode, true);
7719 create_convert_operand_to (&ops[4], desired, mode, true);
7720 create_integer_operand (&ops[5], is_weak);
7721 create_integer_operand (&ops[6], succ_model);
7722 create_integer_operand (&ops[7], fail_model);
7723 expand_insn (icode, 8, ops);
7725 /* Return success/failure. */
7726 target_bool = ops[0].value;
7727 target_oval = ops[1].value;
7728 goto success;
7731 /* Otherwise fall back to the original __sync_val_compare_and_swap
7732 which is always seq-cst. */
7733 icode = optab_handler (sync_compare_and_swap_optab, mode);
7734 if (icode != CODE_FOR_nothing)
7736 rtx cc_reg;
7738 create_output_operand (&ops[0], target_oval, mode);
7739 create_fixed_operand (&ops[1], mem);
7740 create_convert_operand_to (&ops[2], expected, mode, true);
7741 create_convert_operand_to (&ops[3], desired, mode, true);
7742 if (!maybe_expand_insn (icode, 4, ops))
7743 return false;
7745 target_oval = ops[0].value;
7747 /* If the caller isn't interested in the boolean return value,
7748 skip the computation of it. */
7749 if (ptarget_bool == NULL)
7750 goto success;
7752 /* Otherwise, work out if the compare-and-swap succeeded. */
7753 cc_reg = NULL_RTX;
7754 if (have_insn_for (COMPARE, CCmode))
7755 note_stores (PATTERN (get_last_insn ()), find_cc_set, &cc_reg);
7756 if (cc_reg)
7758 target_bool = emit_store_flag_force (target_bool, EQ, cc_reg,
7759 const0_rtx, VOIDmode, 0, 1);
7760 goto success;
7762 goto success_bool_from_val;
7765 /* Also check for library support for __sync_val_compare_and_swap. */
7766 libfunc = optab_libfunc (sync_compare_and_swap_optab, mode);
7767 if (libfunc != NULL)
7769 rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
7770 target_oval = emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL,
7771 mode, 3, addr, ptr_mode,
7772 expected, mode, desired, mode);
7774 /* Compute the boolean return value only if requested. */
7775 if (ptarget_bool)
7776 goto success_bool_from_val;
7777 else
7778 goto success;
7781 /* Failure. */
7782 return false;
7784 success_bool_from_val:
7785 target_bool = emit_store_flag_force (target_bool, EQ, target_oval,
7786 expected, VOIDmode, 1, 1);
7787 success:
7788 /* Make sure that the oval output winds up where the caller asked. */
7789 if (ptarget_oval)
7790 *ptarget_oval = target_oval;
7791 if (ptarget_bool)
7792 *ptarget_bool = target_bool;
7793 return true;
7796 /* Generate asm volatile("" : : : "memory") as the memory barrier. */
7798 static void
7799 expand_asm_memory_barrier (void)
7801 rtx asm_op, clob;
7803 asm_op = gen_rtx_ASM_OPERANDS (VOIDmode, empty_string, empty_string, 0,
7804 rtvec_alloc (0), rtvec_alloc (0),
7805 rtvec_alloc (0), UNKNOWN_LOCATION);
7806 MEM_VOLATILE_P (asm_op) = 1;
7808 clob = gen_rtx_SCRATCH (VOIDmode);
7809 clob = gen_rtx_MEM (BLKmode, clob);
7810 clob = gen_rtx_CLOBBER (VOIDmode, clob);
7812 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, asm_op, clob)));
7815 /* This routine will either emit the mem_thread_fence pattern or issue a
7816 sync_synchronize to generate a fence for memory model MEMMODEL. */
7818 #ifndef HAVE_mem_thread_fence
7819 # define HAVE_mem_thread_fence 0
7820 # define gen_mem_thread_fence(x) (gcc_unreachable (), NULL_RTX)
7821 #endif
7822 #ifndef HAVE_memory_barrier
7823 # define HAVE_memory_barrier 0
7824 # define gen_memory_barrier() (gcc_unreachable (), NULL_RTX)
7825 #endif
7827 void
7828 expand_mem_thread_fence (enum memmodel model)
7830 if (HAVE_mem_thread_fence)
7831 emit_insn (gen_mem_thread_fence (GEN_INT (model)));
7832 else if (model != MEMMODEL_RELAXED)
7834 if (HAVE_memory_barrier)
7835 emit_insn (gen_memory_barrier ());
7836 else if (synchronize_libfunc != NULL_RTX)
7837 emit_library_call (synchronize_libfunc, LCT_NORMAL, VOIDmode, 0);
7838 else
7839 expand_asm_memory_barrier ();
7843 /* This routine will either emit the mem_signal_fence pattern or issue a
7844 sync_synchronize to generate a fence for memory model MEMMODEL. */
7846 #ifndef HAVE_mem_signal_fence
7847 # define HAVE_mem_signal_fence 0
7848 # define gen_mem_signal_fence(x) (gcc_unreachable (), NULL_RTX)
7849 #endif
7851 void
7852 expand_mem_signal_fence (enum memmodel model)
7854 if (HAVE_mem_signal_fence)
7855 emit_insn (gen_mem_signal_fence (GEN_INT (model)));
7856 else if (model != MEMMODEL_RELAXED)
7858 /* By default targets are coherent between a thread and the signal
7859 handler running on the same thread. Thus this really becomes a
7860 compiler barrier, in that stores must not be sunk past
7861 (or raised above) a given point. */
7862 expand_asm_memory_barrier ();
7866 /* This function expands the atomic load operation:
7867 return the atomically loaded value in MEM.
7869 MEMMODEL is the memory model variant to use.
7870 TARGET is an option place to stick the return value. */
7873 expand_atomic_load (rtx target, rtx mem, enum memmodel model)
7875 enum machine_mode mode = GET_MODE (mem);
7876 enum insn_code icode;
7878 /* If the target supports the load directly, great. */
7879 icode = direct_optab_handler (atomic_load_optab, mode);
7880 if (icode != CODE_FOR_nothing)
7882 struct expand_operand ops[3];
7884 create_output_operand (&ops[0], target, mode);
7885 create_fixed_operand (&ops[1], mem);
7886 create_integer_operand (&ops[2], model);
7887 if (maybe_expand_insn (icode, 3, ops))
7888 return ops[0].value;
7891 /* If the size of the object is greater than word size on this target,
7892 then we assume that a load will not be atomic. */
7893 if (GET_MODE_PRECISION (mode) > BITS_PER_WORD)
7895 /* Issue val = compare_and_swap (mem, 0, 0).
7896 This may cause the occasional harmless store of 0 when the value is
7897 already 0, but it seems to be OK according to the standards guys. */
7898 if (expand_atomic_compare_and_swap (NULL, &target, mem, const0_rtx,
7899 const0_rtx, false, model, model))
7900 return target;
7901 else
7902 /* Otherwise there is no atomic load, leave the library call. */
7903 return NULL_RTX;
7906 /* Otherwise assume loads are atomic, and emit the proper barriers. */
7907 if (!target || target == const0_rtx)
7908 target = gen_reg_rtx (mode);
7910 /* Emit the appropriate barrier before the load. */
7911 expand_mem_thread_fence (model);
7913 emit_move_insn (target, mem);
7915 /* For SEQ_CST, also emit a barrier after the load. */
7916 if (model == MEMMODEL_SEQ_CST)
7917 expand_mem_thread_fence (model);
7919 return target;
7922 /* This function expands the atomic store operation:
7923 Atomically store VAL in MEM.
7924 MEMMODEL is the memory model variant to use.
7925 USE_RELEASE is true if __sync_lock_release can be used as a fall back.
7926 function returns const0_rtx if a pattern was emitted. */
7929 expand_atomic_store (rtx mem, rtx val, enum memmodel model, bool use_release)
7931 enum machine_mode mode = GET_MODE (mem);
7932 enum insn_code icode;
7933 struct expand_operand ops[3];
7935 /* If the target supports the store directly, great. */
7936 icode = direct_optab_handler (atomic_store_optab, mode);
7937 if (icode != CODE_FOR_nothing)
7939 create_fixed_operand (&ops[0], mem);
7940 create_input_operand (&ops[1], val, mode);
7941 create_integer_operand (&ops[2], model);
7942 if (maybe_expand_insn (icode, 3, ops))
7943 return const0_rtx;
7946 /* If using __sync_lock_release is a viable alternative, try it. */
7947 if (use_release)
7949 icode = direct_optab_handler (sync_lock_release_optab, mode);
7950 if (icode != CODE_FOR_nothing)
7952 create_fixed_operand (&ops[0], mem);
7953 create_input_operand (&ops[1], const0_rtx, mode);
7954 if (maybe_expand_insn (icode, 2, ops))
7956 /* lock_release is only a release barrier. */
7957 if (model == MEMMODEL_SEQ_CST)
7958 expand_mem_thread_fence (model);
7959 return const0_rtx;
7964 /* If the size of the object is greater than word size on this target,
7965 a default store will not be atomic, Try a mem_exchange and throw away
7966 the result. If that doesn't work, don't do anything. */
7967 if (GET_MODE_PRECISION(mode) > BITS_PER_WORD)
7969 rtx target = maybe_emit_atomic_exchange (NULL_RTX, mem, val, model);
7970 if (!target)
7971 target = maybe_emit_compare_and_swap_exchange_loop (NULL_RTX, mem, val);
7972 if (target)
7973 return const0_rtx;
7974 else
7975 return NULL_RTX;
7978 /* If there is no mem_store, default to a move with barriers */
7979 if (model == MEMMODEL_SEQ_CST || model == MEMMODEL_RELEASE)
7980 expand_mem_thread_fence (model);
7982 emit_move_insn (mem, val);
7984 /* For SEQ_CST, also emit a barrier after the load. */
7985 if (model == MEMMODEL_SEQ_CST)
7986 expand_mem_thread_fence (model);
7988 return const0_rtx;
7992 /* Structure containing the pointers and values required to process the
7993 various forms of the atomic_fetch_op and atomic_op_fetch builtins. */
7995 struct atomic_op_functions
7997 direct_optab mem_fetch_before;
7998 direct_optab mem_fetch_after;
7999 direct_optab mem_no_result;
8000 optab fetch_before;
8001 optab fetch_after;
8002 direct_optab no_result;
8003 enum rtx_code reverse_code;
8007 /* Fill in structure pointed to by OP with the various optab entries for an
8008 operation of type CODE. */
8010 static void
8011 get_atomic_op_for_code (struct atomic_op_functions *op, enum rtx_code code)
8013 gcc_assert (op!= NULL);
8015 /* If SWITCHABLE_TARGET is defined, then subtargets can be switched
8016 in the source code during compilation, and the optab entries are not
8017 computable until runtime. Fill in the values at runtime. */
8018 switch (code)
8020 case PLUS:
8021 op->mem_fetch_before = atomic_fetch_add_optab;
8022 op->mem_fetch_after = atomic_add_fetch_optab;
8023 op->mem_no_result = atomic_add_optab;
8024 op->fetch_before = sync_old_add_optab;
8025 op->fetch_after = sync_new_add_optab;
8026 op->no_result = sync_add_optab;
8027 op->reverse_code = MINUS;
8028 break;
8029 case MINUS:
8030 op->mem_fetch_before = atomic_fetch_sub_optab;
8031 op->mem_fetch_after = atomic_sub_fetch_optab;
8032 op->mem_no_result = atomic_sub_optab;
8033 op->fetch_before = sync_old_sub_optab;
8034 op->fetch_after = sync_new_sub_optab;
8035 op->no_result = sync_sub_optab;
8036 op->reverse_code = PLUS;
8037 break;
8038 case XOR:
8039 op->mem_fetch_before = atomic_fetch_xor_optab;
8040 op->mem_fetch_after = atomic_xor_fetch_optab;
8041 op->mem_no_result = atomic_xor_optab;
8042 op->fetch_before = sync_old_xor_optab;
8043 op->fetch_after = sync_new_xor_optab;
8044 op->no_result = sync_xor_optab;
8045 op->reverse_code = XOR;
8046 break;
8047 case AND:
8048 op->mem_fetch_before = atomic_fetch_and_optab;
8049 op->mem_fetch_after = atomic_and_fetch_optab;
8050 op->mem_no_result = atomic_and_optab;
8051 op->fetch_before = sync_old_and_optab;
8052 op->fetch_after = sync_new_and_optab;
8053 op->no_result = sync_and_optab;
8054 op->reverse_code = UNKNOWN;
8055 break;
8056 case IOR:
8057 op->mem_fetch_before = atomic_fetch_or_optab;
8058 op->mem_fetch_after = atomic_or_fetch_optab;
8059 op->mem_no_result = atomic_or_optab;
8060 op->fetch_before = sync_old_ior_optab;
8061 op->fetch_after = sync_new_ior_optab;
8062 op->no_result = sync_ior_optab;
8063 op->reverse_code = UNKNOWN;
8064 break;
8065 case NOT:
8066 op->mem_fetch_before = atomic_fetch_nand_optab;
8067 op->mem_fetch_after = atomic_nand_fetch_optab;
8068 op->mem_no_result = atomic_nand_optab;
8069 op->fetch_before = sync_old_nand_optab;
8070 op->fetch_after = sync_new_nand_optab;
8071 op->no_result = sync_nand_optab;
8072 op->reverse_code = UNKNOWN;
8073 break;
8074 default:
8075 gcc_unreachable ();
8079 /* See if there is a more optimal way to implement the operation "*MEM CODE VAL"
8080 using memory order MODEL. If AFTER is true the operation needs to return
8081 the value of *MEM after the operation, otherwise the previous value.
8082 TARGET is an optional place to place the result. The result is unused if
8083 it is const0_rtx.
8084 Return the result if there is a better sequence, otherwise NULL_RTX. */
8086 static rtx
8087 maybe_optimize_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
8088 enum memmodel model, bool after)
8090 /* If the value is prefetched, or not used, it may be possible to replace
8091 the sequence with a native exchange operation. */
8092 if (!after || target == const0_rtx)
8094 /* fetch_and (&x, 0, m) can be replaced with exchange (&x, 0, m). */
8095 if (code == AND && val == const0_rtx)
8097 if (target == const0_rtx)
8098 target = gen_reg_rtx (GET_MODE (mem));
8099 return maybe_emit_atomic_exchange (target, mem, val, model);
8102 /* fetch_or (&x, -1, m) can be replaced with exchange (&x, -1, m). */
8103 if (code == IOR && val == constm1_rtx)
8105 if (target == const0_rtx)
8106 target = gen_reg_rtx (GET_MODE (mem));
8107 return maybe_emit_atomic_exchange (target, mem, val, model);
8111 return NULL_RTX;
8114 /* Try to emit an instruction for a specific operation varaition.
8115 OPTAB contains the OP functions.
8116 TARGET is an optional place to return the result. const0_rtx means unused.
8117 MEM is the memory location to operate on.
8118 VAL is the value to use in the operation.
8119 USE_MEMMODEL is TRUE if the variation with a memory model should be tried.
8120 MODEL is the memory model, if used.
8121 AFTER is true if the returned result is the value after the operation. */
8123 static rtx
8124 maybe_emit_op (const struct atomic_op_functions *optab, rtx target, rtx mem,
8125 rtx val, bool use_memmodel, enum memmodel model, bool after)
8127 enum machine_mode mode = GET_MODE (mem);
8128 struct expand_operand ops[4];
8129 enum insn_code icode;
8130 int op_counter = 0;
8131 int num_ops;
8133 /* Check to see if there is a result returned. */
8134 if (target == const0_rtx)
8136 if (use_memmodel)
8138 icode = direct_optab_handler (optab->mem_no_result, mode);
8139 create_integer_operand (&ops[2], model);
8140 num_ops = 3;
8142 else
8144 icode = direct_optab_handler (optab->no_result, mode);
8145 num_ops = 2;
8148 /* Otherwise, we need to generate a result. */
8149 else
8151 if (use_memmodel)
8153 icode = direct_optab_handler (after ? optab->mem_fetch_after
8154 : optab->mem_fetch_before, mode);
8155 create_integer_operand (&ops[3], model);
8156 num_ops = 4;
8158 else
8160 icode = optab_handler (after ? optab->fetch_after
8161 : optab->fetch_before, mode);
8162 num_ops = 3;
8164 create_output_operand (&ops[op_counter++], target, mode);
8166 if (icode == CODE_FOR_nothing)
8167 return NULL_RTX;
8169 create_fixed_operand (&ops[op_counter++], mem);
8170 /* VAL may have been promoted to a wider mode. Shrink it if so. */
8171 create_convert_operand_to (&ops[op_counter++], val, mode, true);
8173 if (maybe_expand_insn (icode, num_ops, ops))
8174 return (target == const0_rtx ? const0_rtx : ops[0].value);
8176 return NULL_RTX;
8180 /* This function expands an atomic fetch_OP or OP_fetch operation:
8181 TARGET is an option place to stick the return value. const0_rtx indicates
8182 the result is unused.
8183 atomically fetch MEM, perform the operation with VAL and return it to MEM.
8184 CODE is the operation being performed (OP)
8185 MEMMODEL is the memory model variant to use.
8186 AFTER is true to return the result of the operation (OP_fetch).
8187 AFTER is false to return the value before the operation (fetch_OP). */
8189 expand_atomic_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
8190 enum memmodel model, bool after)
8192 enum machine_mode mode = GET_MODE (mem);
8193 struct atomic_op_functions optab;
8194 rtx result;
8195 bool unused_result = (target == const0_rtx);
8197 get_atomic_op_for_code (&optab, code);
8199 /* Check to see if there are any better instructions. */
8200 result = maybe_optimize_fetch_op (target, mem, val, code, model, after);
8201 if (result)
8202 return result;
8204 /* Check for the case where the result isn't used and try those patterns. */
8205 if (unused_result)
8207 /* Try the memory model variant first. */
8208 result = maybe_emit_op (&optab, target, mem, val, true, model, true);
8209 if (result)
8210 return result;
8212 /* Next try the old style withuot a memory model. */
8213 result = maybe_emit_op (&optab, target, mem, val, false, model, true);
8214 if (result)
8215 return result;
8217 /* There is no no-result pattern, so try patterns with a result. */
8218 target = NULL_RTX;
8221 /* Try the __atomic version. */
8222 result = maybe_emit_op (&optab, target, mem, val, true, model, after);
8223 if (result)
8224 return result;
8226 /* Try the older __sync version. */
8227 result = maybe_emit_op (&optab, target, mem, val, false, model, after);
8228 if (result)
8229 return result;
8231 /* If the fetch value can be calculated from the other variation of fetch,
8232 try that operation. */
8233 if (after || unused_result || optab.reverse_code != UNKNOWN)
8235 /* Try the __atomic version, then the older __sync version. */
8236 result = maybe_emit_op (&optab, target, mem, val, true, model, !after);
8237 if (!result)
8238 result = maybe_emit_op (&optab, target, mem, val, false, model, !after);
8240 if (result)
8242 /* If the result isn't used, no need to do compensation code. */
8243 if (unused_result)
8244 return result;
8246 /* Issue compensation code. Fetch_after == fetch_before OP val.
8247 Fetch_before == after REVERSE_OP val. */
8248 if (!after)
8249 code = optab.reverse_code;
8250 if (code == NOT)
8252 result = expand_simple_binop (mode, AND, result, val, NULL_RTX,
8253 true, OPTAB_LIB_WIDEN);
8254 result = expand_simple_unop (mode, NOT, result, target, true);
8256 else
8257 result = expand_simple_binop (mode, code, result, val, target,
8258 true, OPTAB_LIB_WIDEN);
8259 return result;
8263 /* Try the __sync libcalls only if we can't do compare-and-swap inline. */
8264 if (!can_compare_and_swap_p (mode, false))
8266 rtx libfunc;
8267 bool fixup = false;
8269 libfunc = optab_libfunc (after ? optab.fetch_after
8270 : optab.fetch_before, mode);
8271 if (libfunc == NULL
8272 && (after || unused_result || optab.reverse_code != UNKNOWN))
8274 fixup = true;
8275 if (!after)
8276 code = optab.reverse_code;
8277 libfunc = optab_libfunc (after ? optab.fetch_before
8278 : optab.fetch_after, mode);
8280 if (libfunc != NULL)
8282 rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
8283 result = emit_library_call_value (libfunc, NULL, LCT_NORMAL, mode,
8284 2, addr, ptr_mode, val, mode);
8286 if (!unused_result && fixup)
8287 result = expand_simple_binop (mode, code, result, val, target,
8288 true, OPTAB_LIB_WIDEN);
8289 return result;
8293 /* If nothing else has succeeded, default to a compare and swap loop. */
8294 if (can_compare_and_swap_p (mode, true))
8296 rtx insn;
8297 rtx t0 = gen_reg_rtx (mode), t1;
8299 start_sequence ();
8301 /* If the result is used, get a register for it. */
8302 if (!unused_result)
8304 if (!target || !register_operand (target, mode))
8305 target = gen_reg_rtx (mode);
8306 /* If fetch_before, copy the value now. */
8307 if (!after)
8308 emit_move_insn (target, t0);
8310 else
8311 target = const0_rtx;
8313 t1 = t0;
8314 if (code == NOT)
8316 t1 = expand_simple_binop (mode, AND, t1, val, NULL_RTX,
8317 true, OPTAB_LIB_WIDEN);
8318 t1 = expand_simple_unop (mode, code, t1, NULL_RTX, true);
8320 else
8321 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX, true,
8322 OPTAB_LIB_WIDEN);
8324 /* For after, copy the value now. */
8325 if (!unused_result && after)
8326 emit_move_insn (target, t1);
8327 insn = get_insns ();
8328 end_sequence ();
8330 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
8331 return target;
8334 return NULL_RTX;
8337 /* Return true if OPERAND is suitable for operand number OPNO of
8338 instruction ICODE. */
8340 bool
8341 insn_operand_matches (enum insn_code icode, unsigned int opno, rtx operand)
8343 return (!insn_data[(int) icode].operand[opno].predicate
8344 || (insn_data[(int) icode].operand[opno].predicate
8345 (operand, insn_data[(int) icode].operand[opno].mode)));
8348 /* TARGET is a target of a multiword operation that we are going to
8349 implement as a series of word-mode operations. Return true if
8350 TARGET is suitable for this purpose. */
8352 bool
8353 valid_multiword_target_p (rtx target)
8355 enum machine_mode mode;
8356 int i;
8358 mode = GET_MODE (target);
8359 for (i = 0; i < GET_MODE_SIZE (mode); i += UNITS_PER_WORD)
8360 if (!validate_subreg (word_mode, mode, target, i))
8361 return false;
8362 return true;
8365 /* Like maybe_legitimize_operand, but do not change the code of the
8366 current rtx value. */
8368 static bool
8369 maybe_legitimize_operand_same_code (enum insn_code icode, unsigned int opno,
8370 struct expand_operand *op)
8372 /* See if the operand matches in its current form. */
8373 if (insn_operand_matches (icode, opno, op->value))
8374 return true;
8376 /* If the operand is a memory whose address has no side effects,
8377 try forcing the address into a non-virtual pseudo register.
8378 The check for side effects is important because copy_to_mode_reg
8379 cannot handle things like auto-modified addresses. */
8380 if (insn_data[(int) icode].operand[opno].allows_mem && MEM_P (op->value))
8382 rtx addr, mem;
8384 mem = op->value;
8385 addr = XEXP (mem, 0);
8386 if (!(REG_P (addr) && REGNO (addr) > LAST_VIRTUAL_REGISTER)
8387 && !side_effects_p (addr))
8389 rtx last;
8390 enum machine_mode mode;
8392 last = get_last_insn ();
8393 mode = get_address_mode (mem);
8394 mem = replace_equiv_address (mem, copy_to_mode_reg (mode, addr));
8395 if (insn_operand_matches (icode, opno, mem))
8397 op->value = mem;
8398 return true;
8400 delete_insns_since (last);
8404 return false;
8407 /* Try to make OP match operand OPNO of instruction ICODE. Return true
8408 on success, storing the new operand value back in OP. */
8410 static bool
8411 maybe_legitimize_operand (enum insn_code icode, unsigned int opno,
8412 struct expand_operand *op)
8414 enum machine_mode mode, imode;
8415 bool old_volatile_ok, result;
8417 mode = op->mode;
8418 switch (op->type)
8420 case EXPAND_FIXED:
8421 old_volatile_ok = volatile_ok;
8422 volatile_ok = true;
8423 result = maybe_legitimize_operand_same_code (icode, opno, op);
8424 volatile_ok = old_volatile_ok;
8425 return result;
8427 case EXPAND_OUTPUT:
8428 gcc_assert (mode != VOIDmode);
8429 if (op->value
8430 && op->value != const0_rtx
8431 && GET_MODE (op->value) == mode
8432 && maybe_legitimize_operand_same_code (icode, opno, op))
8433 return true;
8435 op->value = gen_reg_rtx (mode);
8436 break;
8438 case EXPAND_INPUT:
8439 input:
8440 gcc_assert (mode != VOIDmode);
8441 gcc_assert (GET_MODE (op->value) == VOIDmode
8442 || GET_MODE (op->value) == mode);
8443 if (maybe_legitimize_operand_same_code (icode, opno, op))
8444 return true;
8446 op->value = copy_to_mode_reg (mode, op->value);
8447 break;
8449 case EXPAND_CONVERT_TO:
8450 gcc_assert (mode != VOIDmode);
8451 op->value = convert_to_mode (mode, op->value, op->unsigned_p);
8452 goto input;
8454 case EXPAND_CONVERT_FROM:
8455 if (GET_MODE (op->value) != VOIDmode)
8456 mode = GET_MODE (op->value);
8457 else
8458 /* The caller must tell us what mode this value has. */
8459 gcc_assert (mode != VOIDmode);
8461 imode = insn_data[(int) icode].operand[opno].mode;
8462 if (imode != VOIDmode && imode != mode)
8464 op->value = convert_modes (imode, mode, op->value, op->unsigned_p);
8465 mode = imode;
8467 goto input;
8469 case EXPAND_ADDRESS:
8470 gcc_assert (mode != VOIDmode);
8471 op->value = convert_memory_address (mode, op->value);
8472 goto input;
8474 case EXPAND_INTEGER:
8475 mode = insn_data[(int) icode].operand[opno].mode;
8476 if (mode != VOIDmode && const_int_operand (op->value, mode))
8477 goto input;
8478 break;
8480 return insn_operand_matches (icode, opno, op->value);
8483 /* Make OP describe an input operand that should have the same value
8484 as VALUE, after any mode conversion that the target might request.
8485 TYPE is the type of VALUE. */
8487 void
8488 create_convert_operand_from_type (struct expand_operand *op,
8489 rtx value, tree type)
8491 create_convert_operand_from (op, value, TYPE_MODE (type),
8492 TYPE_UNSIGNED (type));
8495 /* Try to make operands [OPS, OPS + NOPS) match operands [OPNO, OPNO + NOPS)
8496 of instruction ICODE. Return true on success, leaving the new operand
8497 values in the OPS themselves. Emit no code on failure. */
8499 bool
8500 maybe_legitimize_operands (enum insn_code icode, unsigned int opno,
8501 unsigned int nops, struct expand_operand *ops)
8503 rtx last;
8504 unsigned int i;
8506 last = get_last_insn ();
8507 for (i = 0; i < nops; i++)
8508 if (!maybe_legitimize_operand (icode, opno + i, &ops[i]))
8510 delete_insns_since (last);
8511 return false;
8513 return true;
8516 /* Try to generate instruction ICODE, using operands [OPS, OPS + NOPS)
8517 as its operands. Return the instruction pattern on success,
8518 and emit any necessary set-up code. Return null and emit no
8519 code on failure. */
8522 maybe_gen_insn (enum insn_code icode, unsigned int nops,
8523 struct expand_operand *ops)
8525 gcc_assert (nops == (unsigned int) insn_data[(int) icode].n_generator_args);
8526 if (!maybe_legitimize_operands (icode, 0, nops, ops))
8527 return NULL_RTX;
8529 switch (nops)
8531 case 1:
8532 return GEN_FCN (icode) (ops[0].value);
8533 case 2:
8534 return GEN_FCN (icode) (ops[0].value, ops[1].value);
8535 case 3:
8536 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value);
8537 case 4:
8538 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
8539 ops[3].value);
8540 case 5:
8541 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
8542 ops[3].value, ops[4].value);
8543 case 6:
8544 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
8545 ops[3].value, ops[4].value, ops[5].value);
8546 case 7:
8547 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
8548 ops[3].value, ops[4].value, ops[5].value,
8549 ops[6].value);
8550 case 8:
8551 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
8552 ops[3].value, ops[4].value, ops[5].value,
8553 ops[6].value, ops[7].value);
8555 gcc_unreachable ();
8558 /* Try to emit instruction ICODE, using operands [OPS, OPS + NOPS)
8559 as its operands. Return true on success and emit no code on failure. */
8561 bool
8562 maybe_expand_insn (enum insn_code icode, unsigned int nops,
8563 struct expand_operand *ops)
8565 rtx pat = maybe_gen_insn (icode, nops, ops);
8566 if (pat)
8568 emit_insn (pat);
8569 return true;
8571 return false;
8574 /* Like maybe_expand_insn, but for jumps. */
8576 bool
8577 maybe_expand_jump_insn (enum insn_code icode, unsigned int nops,
8578 struct expand_operand *ops)
8580 rtx pat = maybe_gen_insn (icode, nops, ops);
8581 if (pat)
8583 emit_jump_insn (pat);
8584 return true;
8586 return false;
8589 /* Emit instruction ICODE, using operands [OPS, OPS + NOPS)
8590 as its operands. */
8592 void
8593 expand_insn (enum insn_code icode, unsigned int nops,
8594 struct expand_operand *ops)
8596 if (!maybe_expand_insn (icode, nops, ops))
8597 gcc_unreachable ();
8600 /* Like expand_insn, but for jumps. */
8602 void
8603 expand_jump_insn (enum insn_code icode, unsigned int nops,
8604 struct expand_operand *ops)
8606 if (!maybe_expand_jump_insn (icode, nops, ops))
8607 gcc_unreachable ();
8610 #include "gt-optabs.h"