Fix gnat.dg/opt39.adb on hppa.
[official-gcc.git] / gcc / optabs.cc
blobc8e39c82d57a7d726e7da33d247b80f32ec9236c
1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987-2023 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "memmodel.h"
29 #include "predict.h"
30 #include "tm_p.h"
31 #include "optabs.h"
32 #include "expmed.h"
33 #include "emit-rtl.h"
34 #include "recog.h"
35 #include "diagnostic-core.h"
36 #include "rtx-vector-builder.h"
38 /* Include insn-config.h before expr.h so that HAVE_conditional_move
39 is properly defined. */
40 #include "stor-layout.h"
41 #include "except.h"
42 #include "dojump.h"
43 #include "explow.h"
44 #include "expr.h"
45 #include "optabs-tree.h"
46 #include "libfuncs.h"
47 #include "internal-fn.h"
48 #include "langhooks.h"
49 #include "gimple.h"
50 #include "ssa.h"
52 static void prepare_float_lib_cmp (rtx, rtx, enum rtx_code, rtx *,
53 machine_mode *);
54 static rtx expand_unop_direct (machine_mode, optab, rtx, rtx, int);
55 static void emit_libcall_block_1 (rtx_insn *, rtx, rtx, rtx, bool);
57 static rtx emit_conditional_move_1 (rtx, rtx, rtx, rtx, machine_mode);
59 /* Debug facility for use in GDB. */
60 void debug_optab_libfuncs (void);
62 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
63 the result of operation CODE applied to OP0 (and OP1 if it is a binary
64 operation). OP0_MODE is OP0's mode.
66 If the last insn does not set TARGET, don't do anything, but return 1.
68 If the last insn or a previous insn sets TARGET and TARGET is one of OP0
69 or OP1, don't add the REG_EQUAL note but return 0. Our caller can then
70 try again, ensuring that TARGET is not one of the operands. */
72 static int
73 add_equal_note (rtx_insn *insns, rtx target, enum rtx_code code, rtx op0,
74 rtx op1, machine_mode op0_mode)
76 rtx_insn *last_insn;
77 rtx set;
78 rtx note;
80 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
82 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
83 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
84 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
85 && GET_RTX_CLASS (code) != RTX_COMPARE
86 && GET_RTX_CLASS (code) != RTX_UNARY)
87 return 1;
89 if (GET_CODE (target) == ZERO_EXTRACT)
90 return 1;
92 for (last_insn = insns;
93 NEXT_INSN (last_insn) != NULL_RTX;
94 last_insn = NEXT_INSN (last_insn))
97 /* If TARGET is in OP0 or OP1, punt. We'd end up with a note referencing
98 a value changing in the insn, so the note would be invalid for CSE. */
99 if (reg_overlap_mentioned_p (target, op0)
100 || (op1 && reg_overlap_mentioned_p (target, op1)))
102 if (MEM_P (target)
103 && (rtx_equal_p (target, op0)
104 || (op1 && rtx_equal_p (target, op1))))
106 /* For MEM target, with MEM = MEM op X, prefer no REG_EQUAL note
107 over expanding it as temp = MEM op X, MEM = temp. If the target
108 supports MEM = MEM op X instructions, it is sometimes too hard
109 to reconstruct that form later, especially if X is also a memory,
110 and due to multiple occurrences of addresses the address might
111 be forced into register unnecessarily.
112 Note that not emitting the REG_EQUIV note might inhibit
113 CSE in some cases. */
114 set = single_set (last_insn);
115 if (set
116 && GET_CODE (SET_SRC (set)) == code
117 && MEM_P (SET_DEST (set))
118 && (rtx_equal_p (SET_DEST (set), XEXP (SET_SRC (set), 0))
119 || (op1 && rtx_equal_p (SET_DEST (set),
120 XEXP (SET_SRC (set), 1)))))
121 return 1;
123 return 0;
126 set = set_for_reg_notes (last_insn);
127 if (set == NULL_RTX)
128 return 1;
130 if (! rtx_equal_p (SET_DEST (set), target)
131 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
132 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
133 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
134 return 1;
136 if (GET_RTX_CLASS (code) == RTX_UNARY)
137 switch (code)
139 case FFS:
140 case CLZ:
141 case CTZ:
142 case CLRSB:
143 case POPCOUNT:
144 case PARITY:
145 case BSWAP:
146 if (op0_mode != VOIDmode && GET_MODE (target) != op0_mode)
148 note = gen_rtx_fmt_e (code, op0_mode, copy_rtx (op0));
149 if (GET_MODE_UNIT_SIZE (op0_mode)
150 > GET_MODE_UNIT_SIZE (GET_MODE (target)))
151 note = simplify_gen_unary (TRUNCATE, GET_MODE (target),
152 note, op0_mode);
153 else
154 note = simplify_gen_unary (ZERO_EXTEND, GET_MODE (target),
155 note, op0_mode);
156 break;
158 /* FALLTHRU */
159 default:
160 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
161 break;
163 else
164 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
166 set_unique_reg_note (last_insn, REG_EQUAL, note);
168 return 1;
171 /* Given two input operands, OP0 and OP1, determine what the correct from_mode
172 for a widening operation would be. In most cases this would be OP0, but if
173 that's a constant it'll be VOIDmode, which isn't useful. */
175 static machine_mode
176 widened_mode (machine_mode to_mode, rtx op0, rtx op1)
178 machine_mode m0 = GET_MODE (op0);
179 machine_mode m1 = GET_MODE (op1);
180 machine_mode result;
182 if (m0 == VOIDmode && m1 == VOIDmode)
183 return to_mode;
184 else if (m0 == VOIDmode || GET_MODE_UNIT_SIZE (m0) < GET_MODE_UNIT_SIZE (m1))
185 result = m1;
186 else
187 result = m0;
189 if (GET_MODE_UNIT_SIZE (result) > GET_MODE_UNIT_SIZE (to_mode))
190 return to_mode;
192 return result;
195 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
196 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
197 not actually do a sign-extend or zero-extend, but can leave the
198 higher-order bits of the result rtx undefined, for example, in the case
199 of logical operations, but not right shifts. */
201 static rtx
202 widen_operand (rtx op, machine_mode mode, machine_mode oldmode,
203 int unsignedp, int no_extend)
205 rtx result;
206 scalar_int_mode int_mode;
208 /* If we don't have to extend and this is a constant, return it. */
209 if (no_extend && GET_MODE (op) == VOIDmode)
210 return op;
212 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
213 extend since it will be more efficient to do so unless the signedness of
214 a promoted object differs from our extension. */
215 if (! no_extend
216 || !is_a <scalar_int_mode> (mode, &int_mode)
217 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
218 && SUBREG_CHECK_PROMOTED_SIGN (op, unsignedp)))
219 return convert_modes (mode, oldmode, op, unsignedp);
221 /* If MODE is no wider than a single word, we return a lowpart or paradoxical
222 SUBREG. */
223 if (GET_MODE_SIZE (int_mode) <= UNITS_PER_WORD)
224 return gen_lowpart (int_mode, force_reg (GET_MODE (op), op));
226 /* Otherwise, get an object of MODE, clobber it, and set the low-order
227 part to OP. */
229 result = gen_reg_rtx (int_mode);
230 emit_clobber (result);
231 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
232 return result;
235 /* Expand vector widening operations.
237 There are two different classes of operations handled here:
238 1) Operations whose result is wider than all the arguments to the operation.
239 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
240 In this case OP0 and optionally OP1 would be initialized,
241 but WIDE_OP wouldn't (not relevant for this case).
242 2) Operations whose result is of the same size as the last argument to the
243 operation, but wider than all the other arguments to the operation.
244 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
245 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
247 E.g, when called to expand the following operations, this is how
248 the arguments will be initialized:
249 nops OP0 OP1 WIDE_OP
250 widening-sum 2 oprnd0 - oprnd1
251 widening-dot-product 3 oprnd0 oprnd1 oprnd2
252 widening-mult 2 oprnd0 oprnd1 -
253 type-promotion (vec-unpack) 1 oprnd0 - - */
256 expand_widen_pattern_expr (sepops ops, rtx op0, rtx op1, rtx wide_op,
257 rtx target, int unsignedp)
259 class expand_operand eops[4];
260 tree oprnd0, oprnd1, oprnd2;
261 machine_mode wmode = VOIDmode, tmode0, tmode1 = VOIDmode;
262 optab widen_pattern_optab;
263 enum insn_code icode;
264 int nops = TREE_CODE_LENGTH (ops->code);
265 int op;
266 bool sbool = false;
268 oprnd0 = ops->op0;
269 oprnd1 = nops >= 2 ? ops->op1 : NULL_TREE;
270 oprnd2 = nops >= 3 ? ops->op2 : NULL_TREE;
272 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
273 if (ops->code == VEC_UNPACK_FIX_TRUNC_HI_EXPR
274 || ops->code == VEC_UNPACK_FIX_TRUNC_LO_EXPR)
275 /* The sign is from the result type rather than operand's type
276 for these ops. */
277 widen_pattern_optab
278 = optab_for_tree_code (ops->code, ops->type, optab_default);
279 else if ((ops->code == VEC_UNPACK_HI_EXPR
280 || ops->code == VEC_UNPACK_LO_EXPR)
281 && VECTOR_BOOLEAN_TYPE_P (ops->type)
282 && VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (oprnd0))
283 && TYPE_MODE (ops->type) == TYPE_MODE (TREE_TYPE (oprnd0))
284 && SCALAR_INT_MODE_P (TYPE_MODE (ops->type)))
286 /* For VEC_UNPACK_{LO,HI}_EXPR if the mode of op0 and result is
287 the same scalar mode for VECTOR_BOOLEAN_TYPE_P vectors, use
288 vec_unpacks_sbool_{lo,hi}_optab, so that we can pass in
289 the pattern number of elements in the wider vector. */
290 widen_pattern_optab
291 = (ops->code == VEC_UNPACK_HI_EXPR
292 ? vec_unpacks_sbool_hi_optab : vec_unpacks_sbool_lo_optab);
293 sbool = true;
295 else if (ops->code == DOT_PROD_EXPR)
297 enum optab_subtype subtype = optab_default;
298 signop sign1 = TYPE_SIGN (TREE_TYPE (oprnd0));
299 signop sign2 = TYPE_SIGN (TREE_TYPE (oprnd1));
300 if (sign1 == sign2)
302 else if (sign1 == SIGNED && sign2 == UNSIGNED)
304 subtype = optab_vector_mixed_sign;
305 /* Same as optab_vector_mixed_sign but flip the operands. */
306 std::swap (op0, op1);
308 else if (sign1 == UNSIGNED && sign2 == SIGNED)
309 subtype = optab_vector_mixed_sign;
310 else
311 gcc_unreachable ();
313 widen_pattern_optab
314 = optab_for_tree_code (ops->code, TREE_TYPE (oprnd0), subtype);
316 else
317 widen_pattern_optab
318 = optab_for_tree_code (ops->code, TREE_TYPE (oprnd0), optab_default);
319 if (ops->code == WIDEN_MULT_PLUS_EXPR
320 || ops->code == WIDEN_MULT_MINUS_EXPR)
321 icode = find_widening_optab_handler (widen_pattern_optab,
322 TYPE_MODE (TREE_TYPE (ops->op2)),
323 tmode0);
324 else
325 icode = optab_handler (widen_pattern_optab, tmode0);
326 gcc_assert (icode != CODE_FOR_nothing);
328 if (nops >= 2)
329 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
330 else if (sbool)
332 nops = 2;
333 op1 = GEN_INT (TYPE_VECTOR_SUBPARTS (TREE_TYPE (oprnd0)).to_constant ());
334 tmode1 = tmode0;
337 /* The last operand is of a wider mode than the rest of the operands. */
338 if (nops == 2)
339 wmode = tmode1;
340 else if (nops == 3)
342 gcc_assert (tmode1 == tmode0);
343 gcc_assert (op1);
344 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
347 op = 0;
348 create_output_operand (&eops[op++], target, TYPE_MODE (ops->type));
349 create_convert_operand_from (&eops[op++], op0, tmode0, unsignedp);
350 if (op1)
351 create_convert_operand_from (&eops[op++], op1, tmode1, unsignedp);
352 if (wide_op)
353 create_convert_operand_from (&eops[op++], wide_op, wmode, unsignedp);
354 expand_insn (icode, op, eops);
355 return eops[0].value;
358 /* Generate code to perform an operation specified by TERNARY_OPTAB
359 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
361 UNSIGNEDP is for the case where we have to widen the operands
362 to perform the operation. It says to use zero-extension.
364 If TARGET is nonzero, the value
365 is generated there, if it is convenient to do so.
366 In all cases an rtx is returned for the locus of the value;
367 this may or may not be TARGET. */
370 expand_ternary_op (machine_mode mode, optab ternary_optab, rtx op0,
371 rtx op1, rtx op2, rtx target, int unsignedp)
373 class expand_operand ops[4];
374 enum insn_code icode = optab_handler (ternary_optab, mode);
376 gcc_assert (optab_handler (ternary_optab, mode) != CODE_FOR_nothing);
378 create_output_operand (&ops[0], target, mode);
379 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
380 create_convert_operand_from (&ops[2], op1, mode, unsignedp);
381 create_convert_operand_from (&ops[3], op2, mode, unsignedp);
382 expand_insn (icode, 4, ops);
383 return ops[0].value;
387 /* Like expand_binop, but return a constant rtx if the result can be
388 calculated at compile time. The arguments and return value are
389 otherwise the same as for expand_binop. */
392 simplify_expand_binop (machine_mode mode, optab binoptab,
393 rtx op0, rtx op1, rtx target, int unsignedp,
394 enum optab_methods methods)
396 if (CONSTANT_P (op0) && CONSTANT_P (op1))
398 rtx x = simplify_binary_operation (optab_to_code (binoptab),
399 mode, op0, op1);
400 if (x)
401 return x;
404 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
407 /* Like simplify_expand_binop, but always put the result in TARGET.
408 Return true if the expansion succeeded. */
410 bool
411 force_expand_binop (machine_mode mode, optab binoptab,
412 rtx op0, rtx op1, rtx target, int unsignedp,
413 enum optab_methods methods)
415 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
416 target, unsignedp, methods);
417 if (x == 0)
418 return false;
419 if (x != target)
420 emit_move_insn (target, x);
421 return true;
424 /* Create a new vector value in VMODE with all elements set to OP. The
425 mode of OP must be the element mode of VMODE. If OP is a constant,
426 then the return value will be a constant. */
429 expand_vector_broadcast (machine_mode vmode, rtx op)
431 int n;
432 rtvec vec;
434 gcc_checking_assert (VECTOR_MODE_P (vmode));
436 if (valid_for_const_vector_p (vmode, op))
437 return gen_const_vec_duplicate (vmode, op);
439 insn_code icode = optab_handler (vec_duplicate_optab, vmode);
440 if (icode != CODE_FOR_nothing)
442 class expand_operand ops[2];
443 create_output_operand (&ops[0], NULL_RTX, vmode);
444 create_input_operand (&ops[1], op, GET_MODE (op));
445 expand_insn (icode, 2, ops);
446 return ops[0].value;
449 if (!GET_MODE_NUNITS (vmode).is_constant (&n))
450 return NULL;
452 /* ??? If the target doesn't have a vec_init, then we have no easy way
453 of performing this operation. Most of this sort of generic support
454 is hidden away in the vector lowering support in gimple. */
455 icode = convert_optab_handler (vec_init_optab, vmode,
456 GET_MODE_INNER (vmode));
457 if (icode == CODE_FOR_nothing)
458 return NULL;
460 vec = rtvec_alloc (n);
461 for (int i = 0; i < n; ++i)
462 RTVEC_ELT (vec, i) = op;
463 rtx ret = gen_reg_rtx (vmode);
464 emit_insn (GEN_FCN (icode) (ret, gen_rtx_PARALLEL (vmode, vec)));
466 return ret;
469 /* This subroutine of expand_doubleword_shift handles the cases in which
470 the effective shift value is >= BITS_PER_WORD. The arguments and return
471 value are the same as for the parent routine, except that SUPERWORD_OP1
472 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
473 INTO_TARGET may be null if the caller has decided to calculate it. */
475 static bool
476 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
477 rtx outof_target, rtx into_target,
478 int unsignedp, enum optab_methods methods)
480 if (into_target != 0)
481 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
482 into_target, unsignedp, methods))
483 return false;
485 if (outof_target != 0)
487 /* For a signed right shift, we must fill OUTOF_TARGET with copies
488 of the sign bit, otherwise we must fill it with zeros. */
489 if (binoptab != ashr_optab)
490 emit_move_insn (outof_target, CONST0_RTX (word_mode));
491 else
492 if (!force_expand_binop (word_mode, binoptab, outof_input,
493 gen_int_shift_amount (word_mode,
494 BITS_PER_WORD - 1),
495 outof_target, unsignedp, methods))
496 return false;
498 return true;
501 /* This subroutine of expand_doubleword_shift handles the cases in which
502 the effective shift value is < BITS_PER_WORD. The arguments and return
503 value are the same as for the parent routine. */
505 static bool
506 expand_subword_shift (scalar_int_mode op1_mode, optab binoptab,
507 rtx outof_input, rtx into_input, rtx op1,
508 rtx outof_target, rtx into_target,
509 int unsignedp, enum optab_methods methods,
510 unsigned HOST_WIDE_INT shift_mask)
512 optab reverse_unsigned_shift, unsigned_shift;
513 rtx tmp, carries;
515 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
516 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
518 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
519 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
520 the opposite direction to BINOPTAB. */
521 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
523 carries = outof_input;
524 tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD,
525 op1_mode), op1_mode);
526 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
527 0, true, methods);
529 else
531 /* We must avoid shifting by BITS_PER_WORD bits since that is either
532 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
533 has unknown behavior. Do a single shift first, then shift by the
534 remainder. It's OK to use ~OP1 as the remainder if shift counts
535 are truncated to the mode size. */
536 carries = expand_binop (word_mode, reverse_unsigned_shift,
537 outof_input, const1_rtx, 0, unsignedp, methods);
538 if (shift_mask == BITS_PER_WORD - 1)
540 tmp = immed_wide_int_const
541 (wi::minus_one (GET_MODE_PRECISION (op1_mode)), op1_mode);
542 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
543 0, true, methods);
545 else
547 tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD - 1,
548 op1_mode), op1_mode);
549 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
550 0, true, methods);
553 if (tmp == 0 || carries == 0)
554 return false;
555 carries = expand_binop (word_mode, reverse_unsigned_shift,
556 carries, tmp, 0, unsignedp, methods);
557 if (carries == 0)
558 return false;
560 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
561 so the result can go directly into INTO_TARGET if convenient. */
562 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
563 into_target, unsignedp, methods);
564 if (tmp == 0)
565 return false;
567 /* Now OR in the bits carried over from OUTOF_INPUT. */
568 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
569 into_target, unsignedp, methods))
570 return false;
572 /* Use a standard word_mode shift for the out-of half. */
573 if (outof_target != 0)
574 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
575 outof_target, unsignedp, methods))
576 return false;
578 return true;
582 /* Try implementing expand_doubleword_shift using conditional moves.
583 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
584 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
585 are the shift counts to use in the former and latter case. All other
586 arguments are the same as the parent routine. */
588 static bool
589 expand_doubleword_shift_condmove (scalar_int_mode op1_mode, optab binoptab,
590 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
591 rtx outof_input, rtx into_input,
592 rtx subword_op1, rtx superword_op1,
593 rtx outof_target, rtx into_target,
594 int unsignedp, enum optab_methods methods,
595 unsigned HOST_WIDE_INT shift_mask)
597 rtx outof_superword, into_superword;
599 /* Put the superword version of the output into OUTOF_SUPERWORD and
600 INTO_SUPERWORD. */
601 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
602 if (outof_target != 0 && subword_op1 == superword_op1)
604 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
605 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
606 into_superword = outof_target;
607 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
608 outof_superword, 0, unsignedp, methods))
609 return false;
611 else
613 into_superword = gen_reg_rtx (word_mode);
614 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
615 outof_superword, into_superword,
616 unsignedp, methods))
617 return false;
620 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
621 if (!expand_subword_shift (op1_mode, binoptab,
622 outof_input, into_input, subword_op1,
623 outof_target, into_target,
624 unsignedp, methods, shift_mask))
625 return false;
627 /* Select between them. Do the INTO half first because INTO_SUPERWORD
628 might be the current value of OUTOF_TARGET. */
629 if (!emit_conditional_move (into_target, { cmp_code, cmp1, cmp2, op1_mode },
630 into_target, into_superword, word_mode, false))
631 return false;
633 if (outof_target != 0)
634 if (!emit_conditional_move (outof_target,
635 { cmp_code, cmp1, cmp2, op1_mode },
636 outof_target, outof_superword,
637 word_mode, false))
638 return false;
640 return true;
643 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
644 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
645 input operand; the shift moves bits in the direction OUTOF_INPUT->
646 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
647 of the target. OP1 is the shift count and OP1_MODE is its mode.
648 If OP1 is constant, it will have been truncated as appropriate
649 and is known to be nonzero.
651 If SHIFT_MASK is zero, the result of word shifts is undefined when the
652 shift count is outside the range [0, BITS_PER_WORD). This routine must
653 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
655 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
656 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
657 fill with zeros or sign bits as appropriate.
659 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
660 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
661 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
662 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
663 are undefined.
665 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
666 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
667 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
668 function wants to calculate it itself.
670 Return true if the shift could be successfully synthesized. */
672 static bool
673 expand_doubleword_shift (scalar_int_mode op1_mode, optab binoptab,
674 rtx outof_input, rtx into_input, rtx op1,
675 rtx outof_target, rtx into_target,
676 int unsignedp, enum optab_methods methods,
677 unsigned HOST_WIDE_INT shift_mask)
679 rtx superword_op1, tmp, cmp1, cmp2;
680 enum rtx_code cmp_code;
682 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
683 fill the result with sign or zero bits as appropriate. If so, the value
684 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
685 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
686 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
688 This isn't worthwhile for constant shifts since the optimizers will
689 cope better with in-range shift counts. */
690 if (shift_mask >= BITS_PER_WORD
691 && outof_target != 0
692 && !CONSTANT_P (op1))
694 if (!expand_doubleword_shift (op1_mode, binoptab,
695 outof_input, into_input, op1,
696 0, into_target,
697 unsignedp, methods, shift_mask))
698 return false;
699 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
700 outof_target, unsignedp, methods))
701 return false;
702 return true;
705 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
706 is true when the effective shift value is less than BITS_PER_WORD.
707 Set SUPERWORD_OP1 to the shift count that should be used to shift
708 OUTOF_INPUT into INTO_TARGET when the condition is false. */
709 tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD, op1_mode), op1_mode);
710 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
712 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
713 is a subword shift count. */
714 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
715 0, true, methods);
716 cmp2 = CONST0_RTX (op1_mode);
717 cmp_code = EQ;
718 superword_op1 = op1;
720 else
722 /* Set CMP1 to OP1 - BITS_PER_WORD. */
723 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
724 0, true, methods);
725 cmp2 = CONST0_RTX (op1_mode);
726 cmp_code = LT;
727 superword_op1 = cmp1;
729 if (cmp1 == 0)
730 return false;
732 /* If we can compute the condition at compile time, pick the
733 appropriate subroutine. */
734 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
735 if (tmp != 0 && CONST_INT_P (tmp))
737 if (tmp == const0_rtx)
738 return expand_superword_shift (binoptab, outof_input, superword_op1,
739 outof_target, into_target,
740 unsignedp, methods);
741 else
742 return expand_subword_shift (op1_mode, binoptab,
743 outof_input, into_input, op1,
744 outof_target, into_target,
745 unsignedp, methods, shift_mask);
748 /* Try using conditional moves to generate straight-line code. */
749 if (HAVE_conditional_move)
751 rtx_insn *start = get_last_insn ();
752 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
753 cmp_code, cmp1, cmp2,
754 outof_input, into_input,
755 op1, superword_op1,
756 outof_target, into_target,
757 unsignedp, methods, shift_mask))
758 return true;
759 delete_insns_since (start);
762 /* As a last resort, use branches to select the correct alternative. */
763 rtx_code_label *subword_label = gen_label_rtx ();
764 rtx_code_label *done_label = gen_label_rtx ();
766 NO_DEFER_POP;
767 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
768 0, 0, subword_label,
769 profile_probability::uninitialized ());
770 OK_DEFER_POP;
772 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
773 outof_target, into_target,
774 unsignedp, methods))
775 return false;
777 emit_jump_insn (targetm.gen_jump (done_label));
778 emit_barrier ();
779 emit_label (subword_label);
781 if (!expand_subword_shift (op1_mode, binoptab,
782 outof_input, into_input, op1,
783 outof_target, into_target,
784 unsignedp, methods, shift_mask))
785 return false;
787 emit_label (done_label);
788 return true;
791 /* Subroutine of expand_binop. Perform a double word multiplication of
792 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
793 as the target's word_mode. This function return NULL_RTX if anything
794 goes wrong, in which case it may have already emitted instructions
795 which need to be deleted.
797 If we want to multiply two two-word values and have normal and widening
798 multiplies of single-word values, we can do this with three smaller
799 multiplications.
801 The multiplication proceeds as follows:
802 _______________________
803 [__op0_high_|__op0_low__]
804 _______________________
805 * [__op1_high_|__op1_low__]
806 _______________________________________________
807 _______________________
808 (1) [__op0_low__*__op1_low__]
809 _______________________
810 (2a) [__op0_low__*__op1_high_]
811 _______________________
812 (2b) [__op0_high_*__op1_low__]
813 _______________________
814 (3) [__op0_high_*__op1_high_]
817 This gives a 4-word result. Since we are only interested in the
818 lower 2 words, partial result (3) and the upper words of (2a) and
819 (2b) don't need to be calculated. Hence (2a) and (2b) can be
820 calculated using non-widening multiplication.
822 (1), however, needs to be calculated with an unsigned widening
823 multiplication. If this operation is not directly supported we
824 try using a signed widening multiplication and adjust the result.
825 This adjustment works as follows:
827 If both operands are positive then no adjustment is needed.
829 If the operands have different signs, for example op0_low < 0 and
830 op1_low >= 0, the instruction treats the most significant bit of
831 op0_low as a sign bit instead of a bit with significance
832 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
833 with 2**BITS_PER_WORD - op0_low, and two's complements the
834 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
835 the result.
837 Similarly, if both operands are negative, we need to add
838 (op0_low + op1_low) * 2**BITS_PER_WORD.
840 We use a trick to adjust quickly. We logically shift op0_low right
841 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
842 op0_high (op1_high) before it is used to calculate 2b (2a). If no
843 logical shift exists, we do an arithmetic right shift and subtract
844 the 0 or -1. */
846 static rtx
847 expand_doubleword_mult (machine_mode mode, rtx op0, rtx op1, rtx target,
848 bool umulp, enum optab_methods methods)
850 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
851 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
852 rtx wordm1 = (umulp ? NULL_RTX
853 : gen_int_shift_amount (word_mode, BITS_PER_WORD - 1));
854 rtx product, adjust, product_high, temp;
856 rtx op0_high = operand_subword_force (op0, high, mode);
857 rtx op0_low = operand_subword_force (op0, low, mode);
858 rtx op1_high = operand_subword_force (op1, high, mode);
859 rtx op1_low = operand_subword_force (op1, low, mode);
861 /* If we're using an unsigned multiply to directly compute the product
862 of the low-order words of the operands and perform any required
863 adjustments of the operands, we begin by trying two more multiplications
864 and then computing the appropriate sum.
866 We have checked above that the required addition is provided.
867 Full-word addition will normally always succeed, especially if
868 it is provided at all, so we don't worry about its failure. The
869 multiplication may well fail, however, so we do handle that. */
871 if (!umulp)
873 /* ??? This could be done with emit_store_flag where available. */
874 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
875 NULL_RTX, 1, methods);
876 if (temp)
877 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
878 NULL_RTX, 0, OPTAB_DIRECT);
879 else
881 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
882 NULL_RTX, 0, methods);
883 if (!temp)
884 return NULL_RTX;
885 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
886 NULL_RTX, 0, OPTAB_DIRECT);
889 if (!op0_high)
890 return NULL_RTX;
893 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
894 NULL_RTX, 0, OPTAB_DIRECT);
895 if (!adjust)
896 return NULL_RTX;
898 /* OP0_HIGH should now be dead. */
900 if (!umulp)
902 /* ??? This could be done with emit_store_flag where available. */
903 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
904 NULL_RTX, 1, methods);
905 if (temp)
906 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
907 NULL_RTX, 0, OPTAB_DIRECT);
908 else
910 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
911 NULL_RTX, 0, methods);
912 if (!temp)
913 return NULL_RTX;
914 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
915 NULL_RTX, 0, OPTAB_DIRECT);
918 if (!op1_high)
919 return NULL_RTX;
922 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
923 NULL_RTX, 0, OPTAB_DIRECT);
924 if (!temp)
925 return NULL_RTX;
927 /* OP1_HIGH should now be dead. */
929 adjust = expand_binop (word_mode, add_optab, adjust, temp,
930 NULL_RTX, 0, OPTAB_DIRECT);
932 if (target && !REG_P (target))
933 target = NULL_RTX;
935 /* *_widen_optab needs to determine operand mode, make sure at least
936 one operand has non-VOID mode. */
937 if (GET_MODE (op0_low) == VOIDmode && GET_MODE (op1_low) == VOIDmode)
938 op0_low = force_reg (word_mode, op0_low);
940 if (umulp)
941 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
942 target, 1, OPTAB_DIRECT);
943 else
944 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
945 target, 1, OPTAB_DIRECT);
947 if (!product)
948 return NULL_RTX;
950 product_high = operand_subword (product, high, 1, mode);
951 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
952 NULL_RTX, 0, OPTAB_DIRECT);
953 emit_move_insn (product_high, adjust);
954 return product;
957 /* Subroutine of expand_binop. Optimize unsigned double-word OP0 % OP1 for
958 constant OP1. If for some bit in [BITS_PER_WORD / 2, BITS_PER_WORD] range
959 (prefer higher bits) ((1w << bit) % OP1) == 1, then the modulo can be
960 computed in word-mode as ((OP0 & (bit - 1)) + ((OP0 >> bit) & (bit - 1))
961 + (OP0 >> (2 * bit))) % OP1. Whether we need to sum 2, 3 or 4 values
962 depends on the bit value, if 2, then carry from the addition needs to be
963 added too, i.e. like:
964 sum += __builtin_add_overflow (low, high, &sum)
966 Optimize signed double-word OP0 % OP1 similarly, just apply some correction
967 factor to the sum before doing unsigned remainder, in the form of
968 sum += (((signed) OP0 >> (2 * BITS_PER_WORD - 1)) & const);
969 then perform unsigned
970 remainder = sum % OP1;
971 and finally
972 remainder += ((signed) OP0 >> (2 * BITS_PER_WORD - 1)) & (1 - OP1); */
974 static rtx
975 expand_doubleword_mod (machine_mode mode, rtx op0, rtx op1, bool unsignedp)
977 if (INTVAL (op1) <= 1 || (INTVAL (op1) & 1) == 0)
978 return NULL_RTX;
980 rtx_insn *last = get_last_insn ();
981 for (int bit = BITS_PER_WORD; bit >= BITS_PER_WORD / 2; bit--)
983 wide_int w = wi::shifted_mask (bit, 1, false, 2 * BITS_PER_WORD);
984 if (wi::ne_p (wi::umod_trunc (w, INTVAL (op1)), 1))
985 continue;
986 rtx sum = NULL_RTX, mask = NULL_RTX;
987 if (bit == BITS_PER_WORD)
989 /* For signed modulo we need to add correction to the sum
990 and that might again overflow. */
991 if (!unsignedp)
992 continue;
993 if (optab_handler (uaddv4_optab, word_mode) == CODE_FOR_nothing)
994 continue;
995 tree wtype = lang_hooks.types.type_for_mode (word_mode, 1);
996 if (wtype == NULL_TREE)
997 continue;
998 tree ctype = build_complex_type (wtype);
999 if (TYPE_MODE (ctype) != GET_MODE_COMPLEX_MODE (word_mode))
1000 continue;
1001 machine_mode cmode = TYPE_MODE (ctype);
1002 rtx op00 = operand_subword_force (op0, 0, mode);
1003 rtx op01 = operand_subword_force (op0, 1, mode);
1004 rtx cres = gen_rtx_CONCAT (cmode, gen_reg_rtx (word_mode),
1005 gen_reg_rtx (word_mode));
1006 tree lhs = make_tree (ctype, cres);
1007 tree arg0 = make_tree (wtype, op00);
1008 tree arg1 = make_tree (wtype, op01);
1009 expand_addsub_overflow (UNKNOWN_LOCATION, PLUS_EXPR, lhs, arg0,
1010 arg1, true, true, true, false, NULL);
1011 sum = expand_simple_binop (word_mode, PLUS, XEXP (cres, 0),
1012 XEXP (cres, 1), NULL_RTX, 1,
1013 OPTAB_DIRECT);
1014 if (sum == NULL_RTX)
1015 return NULL_RTX;
1017 else
1019 /* Code below uses GEN_INT, so we need the masks to be representable
1020 in HOST_WIDE_INTs. */
1021 if (bit >= HOST_BITS_PER_WIDE_INT)
1022 continue;
1023 /* If op0 is e.g. -1 or -2 unsigned, then the 2 additions might
1024 overflow. Consider 64-bit -1ULL for word size 32, if we add
1025 0x7fffffffU + 0x7fffffffU + 3U, it wraps around to 1. */
1026 if (bit == BITS_PER_WORD - 1)
1027 continue;
1029 int count = (2 * BITS_PER_WORD + bit - 1) / bit;
1030 rtx sum_corr = NULL_RTX;
1032 if (!unsignedp)
1034 /* For signed modulo, compute it as unsigned modulo of
1035 sum with a correction added to it if OP0 is negative,
1036 such that the result can be computed as unsigned
1037 remainder + ((OP1 >> (2 * BITS_PER_WORD - 1)) & (1 - OP1). */
1038 w = wi::min_value (2 * BITS_PER_WORD, SIGNED);
1039 wide_int wmod1 = wi::umod_trunc (w, INTVAL (op1));
1040 wide_int wmod2 = wi::smod_trunc (w, INTVAL (op1));
1041 /* wmod2 == -wmod1. */
1042 wmod2 = wmod2 + (INTVAL (op1) - 1);
1043 if (wi::ne_p (wmod1, wmod2))
1045 wide_int wcorr = wmod2 - wmod1;
1046 if (wi::neg_p (w))
1047 wcorr = wcorr + INTVAL (op1);
1048 /* Now verify if the count sums can't overflow, and punt
1049 if they could. */
1050 w = wi::mask (bit, false, 2 * BITS_PER_WORD);
1051 w = w * (count - 1);
1052 w = w + wi::mask (2 * BITS_PER_WORD - (count - 1) * bit,
1053 false, 2 * BITS_PER_WORD);
1054 w = w + wcorr;
1055 w = wi::lrshift (w, BITS_PER_WORD);
1056 if (wi::ne_p (w, 0))
1057 continue;
1059 mask = operand_subword_force (op0, WORDS_BIG_ENDIAN ? 0 : 1,
1060 mode);
1061 mask = expand_simple_binop (word_mode, ASHIFTRT, mask,
1062 GEN_INT (BITS_PER_WORD - 1),
1063 NULL_RTX, 0, OPTAB_DIRECT);
1064 if (mask == NULL_RTX)
1065 return NULL_RTX;
1066 sum_corr = immed_wide_int_const (wcorr, word_mode);
1067 sum_corr = expand_simple_binop (word_mode, AND, mask,
1068 sum_corr, NULL_RTX, 1,
1069 OPTAB_DIRECT);
1070 if (sum_corr == NULL_RTX)
1071 return NULL_RTX;
1075 for (int i = 0; i < count; i++)
1077 rtx v = op0;
1078 if (i)
1079 v = expand_simple_binop (mode, LSHIFTRT, v, GEN_INT (i * bit),
1080 NULL_RTX, 1, OPTAB_DIRECT);
1081 if (v == NULL_RTX)
1082 return NULL_RTX;
1083 v = lowpart_subreg (word_mode, v, mode);
1084 if (v == NULL_RTX)
1085 return NULL_RTX;
1086 if (i != count - 1)
1087 v = expand_simple_binop (word_mode, AND, v,
1088 GEN_INT ((HOST_WIDE_INT_1U << bit)
1089 - 1), NULL_RTX, 1,
1090 OPTAB_DIRECT);
1091 if (v == NULL_RTX)
1092 return NULL_RTX;
1093 if (sum == NULL_RTX)
1094 sum = v;
1095 else
1096 sum = expand_simple_binop (word_mode, PLUS, sum, v, NULL_RTX,
1097 1, OPTAB_DIRECT);
1098 if (sum == NULL_RTX)
1099 return NULL_RTX;
1101 if (sum_corr)
1103 sum = expand_simple_binop (word_mode, PLUS, sum, sum_corr,
1104 NULL_RTX, 1, OPTAB_DIRECT);
1105 if (sum == NULL_RTX)
1106 return NULL_RTX;
1109 rtx remainder = expand_divmod (1, TRUNC_MOD_EXPR, word_mode, sum,
1110 gen_int_mode (INTVAL (op1), word_mode),
1111 NULL_RTX, 1, OPTAB_DIRECT);
1112 if (remainder == NULL_RTX)
1113 return NULL_RTX;
1115 if (!unsignedp)
1117 if (mask == NULL_RTX)
1119 mask = operand_subword_force (op0, WORDS_BIG_ENDIAN ? 0 : 1,
1120 mode);
1121 mask = expand_simple_binop (word_mode, ASHIFTRT, mask,
1122 GEN_INT (BITS_PER_WORD - 1),
1123 NULL_RTX, 0, OPTAB_DIRECT);
1124 if (mask == NULL_RTX)
1125 return NULL_RTX;
1127 mask = expand_simple_binop (word_mode, AND, mask,
1128 gen_int_mode (1 - INTVAL (op1),
1129 word_mode),
1130 NULL_RTX, 1, OPTAB_DIRECT);
1131 if (mask == NULL_RTX)
1132 return NULL_RTX;
1133 remainder = expand_simple_binop (word_mode, PLUS, remainder,
1134 mask, NULL_RTX, 1, OPTAB_DIRECT);
1135 if (remainder == NULL_RTX)
1136 return NULL_RTX;
1139 remainder = convert_modes (mode, word_mode, remainder, unsignedp);
1140 /* Punt if we need any library calls. */
1141 if (last)
1142 last = NEXT_INSN (last);
1143 else
1144 last = get_insns ();
1145 for (; last; last = NEXT_INSN (last))
1146 if (CALL_P (last))
1147 return NULL_RTX;
1148 return remainder;
1150 return NULL_RTX;
1153 /* Similarly to the above function, but compute both quotient and remainder.
1154 Quotient can be computed from the remainder as:
1155 rem = op0 % op1; // Handled using expand_doubleword_mod
1156 quot = (op0 - rem) * inv; // inv is multiplicative inverse of op1 modulo
1157 // 2 * BITS_PER_WORD
1159 We can also handle cases where op1 is a multiple of power of two constant
1160 and constant handled by expand_doubleword_mod.
1161 op11 = 1 << __builtin_ctz (op1);
1162 op12 = op1 / op11;
1163 rem1 = op0 % op12; // Handled using expand_doubleword_mod
1164 quot1 = (op0 - rem1) * inv; // inv is multiplicative inverse of op12 modulo
1165 // 2 * BITS_PER_WORD
1166 rem = (quot1 % op11) * op12 + rem1;
1167 quot = quot1 / op11; */
1170 expand_doubleword_divmod (machine_mode mode, rtx op0, rtx op1, rtx *rem,
1171 bool unsignedp)
1173 *rem = NULL_RTX;
1175 /* Negative dividend should have been optimized into positive,
1176 similarly modulo by 1 and modulo by power of two is optimized
1177 differently too. */
1178 if (INTVAL (op1) <= 1 || pow2p_hwi (INTVAL (op1)))
1179 return NULL_RTX;
1181 rtx op11 = const1_rtx;
1182 rtx op12 = op1;
1183 if ((INTVAL (op1) & 1) == 0)
1185 int bit = ctz_hwi (INTVAL (op1));
1186 op11 = GEN_INT (HOST_WIDE_INT_1 << bit);
1187 op12 = GEN_INT (INTVAL (op1) >> bit);
1190 rtx rem1 = expand_doubleword_mod (mode, op0, op12, unsignedp);
1191 if (rem1 == NULL_RTX)
1192 return NULL_RTX;
1194 int prec = 2 * BITS_PER_WORD;
1195 wide_int a = wide_int::from (INTVAL (op12), prec + 1, UNSIGNED);
1196 wide_int b = wi::shifted_mask (prec, 1, false, prec + 1);
1197 wide_int m = wide_int::from (wi::mod_inv (a, b), prec, UNSIGNED);
1198 rtx inv = immed_wide_int_const (m, mode);
1200 rtx_insn *last = get_last_insn ();
1201 rtx quot1 = expand_simple_binop (mode, MINUS, op0, rem1,
1202 NULL_RTX, unsignedp, OPTAB_DIRECT);
1203 if (quot1 == NULL_RTX)
1204 return NULL_RTX;
1206 quot1 = expand_simple_binop (mode, MULT, quot1, inv,
1207 NULL_RTX, unsignedp, OPTAB_DIRECT);
1208 if (quot1 == NULL_RTX)
1209 return NULL_RTX;
1211 if (op11 != const1_rtx)
1213 rtx rem2 = expand_divmod (1, TRUNC_MOD_EXPR, mode, quot1, op11,
1214 NULL_RTX, unsignedp, OPTAB_DIRECT);
1215 if (rem2 == NULL_RTX)
1216 return NULL_RTX;
1218 rem2 = expand_simple_binop (mode, MULT, rem2, op12, NULL_RTX,
1219 unsignedp, OPTAB_DIRECT);
1220 if (rem2 == NULL_RTX)
1221 return NULL_RTX;
1223 rem2 = expand_simple_binop (mode, PLUS, rem2, rem1, NULL_RTX,
1224 unsignedp, OPTAB_DIRECT);
1225 if (rem2 == NULL_RTX)
1226 return NULL_RTX;
1228 rtx quot2 = expand_divmod (0, TRUNC_DIV_EXPR, mode, quot1, op11,
1229 NULL_RTX, unsignedp, OPTAB_DIRECT);
1230 if (quot2 == NULL_RTX)
1231 return NULL_RTX;
1233 rem1 = rem2;
1234 quot1 = quot2;
1237 /* Punt if we need any library calls. */
1238 if (last)
1239 last = NEXT_INSN (last);
1240 else
1241 last = get_insns ();
1242 for (; last; last = NEXT_INSN (last))
1243 if (CALL_P (last))
1244 return NULL_RTX;
1246 *rem = rem1;
1247 return quot1;
1250 /* Wrapper around expand_binop which takes an rtx code to specify
1251 the operation to perform, not an optab pointer. All other
1252 arguments are the same. */
1254 expand_simple_binop (machine_mode mode, enum rtx_code code, rtx op0,
1255 rtx op1, rtx target, int unsignedp,
1256 enum optab_methods methods)
1258 optab binop = code_to_optab (code);
1259 gcc_assert (binop);
1261 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1264 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1265 binop. Order them according to commutative_operand_precedence and, if
1266 possible, try to put TARGET or a pseudo first. */
1267 static bool
1268 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
1270 int op0_prec = commutative_operand_precedence (op0);
1271 int op1_prec = commutative_operand_precedence (op1);
1273 if (op0_prec < op1_prec)
1274 return true;
1276 if (op0_prec > op1_prec)
1277 return false;
1279 /* With equal precedence, both orders are ok, but it is better if the
1280 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1281 if (target == 0 || REG_P (target))
1282 return (REG_P (op1) && !REG_P (op0)) || target == op1;
1283 else
1284 return rtx_equal_p (op1, target);
1287 /* Return true if BINOPTAB implements a shift operation. */
1289 static bool
1290 shift_optab_p (optab binoptab)
1292 switch (optab_to_code (binoptab))
1294 case ASHIFT:
1295 case SS_ASHIFT:
1296 case US_ASHIFT:
1297 case ASHIFTRT:
1298 case LSHIFTRT:
1299 case ROTATE:
1300 case ROTATERT:
1301 return true;
1303 default:
1304 return false;
1308 /* Return true if BINOPTAB implements a commutative binary operation. */
1310 static bool
1311 commutative_optab_p (optab binoptab)
1313 return (GET_RTX_CLASS (optab_to_code (binoptab)) == RTX_COMM_ARITH
1314 || binoptab == smul_widen_optab
1315 || binoptab == umul_widen_optab
1316 || binoptab == smul_highpart_optab
1317 || binoptab == umul_highpart_optab);
1320 /* X is to be used in mode MODE as operand OPN to BINOPTAB. If we're
1321 optimizing, and if the operand is a constant that costs more than
1322 1 instruction, force the constant into a register and return that
1323 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
1325 static rtx
1326 avoid_expensive_constant (machine_mode mode, optab binoptab,
1327 int opn, rtx x, bool unsignedp)
1329 bool speed = optimize_insn_for_speed_p ();
1331 if (mode != VOIDmode
1332 && optimize
1333 && CONSTANT_P (x)
1334 && (rtx_cost (x, mode, optab_to_code (binoptab), opn, speed)
1335 > set_src_cost (x, mode, speed)))
1337 if (CONST_INT_P (x))
1339 HOST_WIDE_INT intval = trunc_int_for_mode (INTVAL (x), mode);
1340 if (intval != INTVAL (x))
1341 x = GEN_INT (intval);
1343 else
1344 x = convert_modes (mode, VOIDmode, x, unsignedp);
1345 x = force_reg (mode, x);
1347 return x;
1350 /* Helper function for expand_binop: handle the case where there
1351 is an insn ICODE that directly implements the indicated operation.
1352 Returns null if this is not possible. */
1353 static rtx
1354 expand_binop_directly (enum insn_code icode, machine_mode mode, optab binoptab,
1355 rtx op0, rtx op1,
1356 rtx target, int unsignedp, enum optab_methods methods,
1357 rtx_insn *last)
1359 machine_mode xmode0 = insn_data[(int) icode].operand[1].mode;
1360 machine_mode xmode1 = insn_data[(int) icode].operand[2].mode;
1361 machine_mode mode0, mode1, tmp_mode;
1362 class expand_operand ops[3];
1363 bool commutative_p;
1364 rtx_insn *pat;
1365 rtx xop0 = op0, xop1 = op1;
1366 bool canonicalize_op1 = false;
1368 /* If it is a commutative operator and the modes would match
1369 if we would swap the operands, we can save the conversions. */
1370 commutative_p = commutative_optab_p (binoptab);
1371 if (commutative_p
1372 && GET_MODE (xop0) != xmode0 && GET_MODE (xop1) != xmode1
1373 && GET_MODE (xop0) == xmode1 && GET_MODE (xop1) == xmode0)
1374 std::swap (xop0, xop1);
1376 /* If we are optimizing, force expensive constants into a register. */
1377 xop0 = avoid_expensive_constant (xmode0, binoptab, 0, xop0, unsignedp);
1378 if (!shift_optab_p (binoptab))
1379 xop1 = avoid_expensive_constant (xmode1, binoptab, 1, xop1, unsignedp);
1380 else
1381 /* Shifts and rotates often use a different mode for op1 from op0;
1382 for VOIDmode constants we don't know the mode, so force it
1383 to be canonicalized using convert_modes. */
1384 canonicalize_op1 = true;
1386 /* In case the insn wants input operands in modes different from
1387 those of the actual operands, convert the operands. It would
1388 seem that we don't need to convert CONST_INTs, but we do, so
1389 that they're properly zero-extended, sign-extended or truncated
1390 for their mode. */
1392 mode0 = GET_MODE (xop0) != VOIDmode ? GET_MODE (xop0) : mode;
1393 if (xmode0 != VOIDmode && xmode0 != mode0)
1395 xop0 = convert_modes (xmode0, mode0, xop0, unsignedp);
1396 mode0 = xmode0;
1399 mode1 = ((GET_MODE (xop1) != VOIDmode || canonicalize_op1)
1400 ? GET_MODE (xop1) : mode);
1401 if (xmode1 != VOIDmode && xmode1 != mode1)
1403 xop1 = convert_modes (xmode1, mode1, xop1, unsignedp);
1404 mode1 = xmode1;
1407 /* If operation is commutative,
1408 try to make the first operand a register.
1409 Even better, try to make it the same as the target.
1410 Also try to make the last operand a constant. */
1411 if (commutative_p
1412 && swap_commutative_operands_with_target (target, xop0, xop1))
1413 std::swap (xop0, xop1);
1415 /* Now, if insn's predicates don't allow our operands, put them into
1416 pseudo regs. */
1418 if (binoptab == vec_pack_trunc_optab
1419 || binoptab == vec_pack_usat_optab
1420 || binoptab == vec_pack_ssat_optab
1421 || binoptab == vec_pack_ufix_trunc_optab
1422 || binoptab == vec_pack_sfix_trunc_optab
1423 || binoptab == vec_packu_float_optab
1424 || binoptab == vec_packs_float_optab)
1426 /* The mode of the result is different then the mode of the
1427 arguments. */
1428 tmp_mode = insn_data[(int) icode].operand[0].mode;
1429 if (VECTOR_MODE_P (mode)
1430 && maybe_ne (GET_MODE_NUNITS (tmp_mode), 2 * GET_MODE_NUNITS (mode)))
1432 delete_insns_since (last);
1433 return NULL_RTX;
1436 else
1437 tmp_mode = mode;
1439 create_output_operand (&ops[0], target, tmp_mode);
1440 create_input_operand (&ops[1], xop0, mode0);
1441 create_input_operand (&ops[2], xop1, mode1);
1442 pat = maybe_gen_insn (icode, 3, ops);
1443 if (pat)
1445 /* If PAT is composed of more than one insn, try to add an appropriate
1446 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1447 operand, call expand_binop again, this time without a target. */
1448 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1449 && ! add_equal_note (pat, ops[0].value,
1450 optab_to_code (binoptab),
1451 ops[1].value, ops[2].value, mode0))
1453 delete_insns_since (last);
1454 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1455 unsignedp, methods);
1458 emit_insn (pat);
1459 return ops[0].value;
1461 delete_insns_since (last);
1462 return NULL_RTX;
1465 /* Generate code to perform an operation specified by BINOPTAB
1466 on operands OP0 and OP1, with result having machine-mode MODE.
1468 UNSIGNEDP is for the case where we have to widen the operands
1469 to perform the operation. It says to use zero-extension.
1471 If TARGET is nonzero, the value
1472 is generated there, if it is convenient to do so.
1473 In all cases an rtx is returned for the locus of the value;
1474 this may or may not be TARGET. */
1477 expand_binop (machine_mode mode, optab binoptab, rtx op0, rtx op1,
1478 rtx target, int unsignedp, enum optab_methods methods)
1480 enum optab_methods next_methods
1481 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1482 ? OPTAB_WIDEN : methods);
1483 enum mode_class mclass;
1484 enum insn_code icode;
1485 machine_mode wider_mode;
1486 scalar_int_mode int_mode;
1487 rtx libfunc;
1488 rtx temp;
1489 rtx_insn *entry_last = get_last_insn ();
1490 rtx_insn *last;
1492 mclass = GET_MODE_CLASS (mode);
1494 /* If subtracting an integer constant, convert this into an addition of
1495 the negated constant. */
1497 if (binoptab == sub_optab && CONST_INT_P (op1))
1499 op1 = negate_rtx (mode, op1);
1500 binoptab = add_optab;
1502 /* For shifts, constant invalid op1 might be expanded from different
1503 mode than MODE. As those are invalid, force them to a register
1504 to avoid further problems during expansion. */
1505 else if (CONST_INT_P (op1)
1506 && shift_optab_p (binoptab)
1507 && UINTVAL (op1) >= GET_MODE_BITSIZE (GET_MODE_INNER (mode)))
1509 op1 = gen_int_mode (INTVAL (op1), GET_MODE_INNER (mode));
1510 op1 = force_reg (GET_MODE_INNER (mode), op1);
1513 /* Record where to delete back to if we backtrack. */
1514 last = get_last_insn ();
1516 /* If we can do it with a three-operand insn, do so. */
1518 if (methods != OPTAB_MUST_WIDEN)
1520 if (convert_optab_p (binoptab))
1522 machine_mode from_mode = widened_mode (mode, op0, op1);
1523 icode = find_widening_optab_handler (binoptab, mode, from_mode);
1525 else
1526 icode = optab_handler (binoptab, mode);
1527 if (icode != CODE_FOR_nothing)
1529 temp = expand_binop_directly (icode, mode, binoptab, op0, op1,
1530 target, unsignedp, methods, last);
1531 if (temp)
1532 return temp;
1536 /* If we were trying to rotate, and that didn't work, try rotating
1537 the other direction before falling back to shifts and bitwise-or. */
1538 if (((binoptab == rotl_optab
1539 && (icode = optab_handler (rotr_optab, mode)) != CODE_FOR_nothing)
1540 || (binoptab == rotr_optab
1541 && (icode = optab_handler (rotl_optab, mode)) != CODE_FOR_nothing))
1542 && is_int_mode (mode, &int_mode))
1544 optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab);
1545 rtx newop1;
1546 unsigned int bits = GET_MODE_PRECISION (int_mode);
1548 if (CONST_INT_P (op1))
1549 newop1 = gen_int_shift_amount (int_mode, bits - INTVAL (op1));
1550 else if (targetm.shift_truncation_mask (int_mode) == bits - 1)
1551 newop1 = negate_rtx (GET_MODE (op1), op1);
1552 else
1553 newop1 = expand_binop (GET_MODE (op1), sub_optab,
1554 gen_int_mode (bits, GET_MODE (op1)), op1,
1555 NULL_RTX, unsignedp, OPTAB_DIRECT);
1557 temp = expand_binop_directly (icode, int_mode, otheroptab, op0, newop1,
1558 target, unsignedp, methods, last);
1559 if (temp)
1560 return temp;
1563 /* If this is a multiply, see if we can do a widening operation that
1564 takes operands of this mode and makes a wider mode. */
1566 if (binoptab == smul_optab
1567 && GET_MODE_2XWIDER_MODE (mode).exists (&wider_mode)
1568 && (convert_optab_handler ((unsignedp
1569 ? umul_widen_optab
1570 : smul_widen_optab),
1571 wider_mode, mode) != CODE_FOR_nothing))
1573 /* *_widen_optab needs to determine operand mode, make sure at least
1574 one operand has non-VOID mode. */
1575 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
1576 op0 = force_reg (mode, op0);
1577 temp = expand_binop (wider_mode,
1578 unsignedp ? umul_widen_optab : smul_widen_optab,
1579 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1581 if (temp != 0)
1583 if (GET_MODE_CLASS (mode) == MODE_INT
1584 && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (temp)))
1585 return gen_lowpart (mode, temp);
1586 else
1587 return convert_to_mode (mode, temp, unsignedp);
1591 /* If this is a vector shift by a scalar, see if we can do a vector
1592 shift by a vector. If so, broadcast the scalar into a vector. */
1593 if (mclass == MODE_VECTOR_INT)
1595 optab otheroptab = unknown_optab;
1597 if (binoptab == ashl_optab)
1598 otheroptab = vashl_optab;
1599 else if (binoptab == ashr_optab)
1600 otheroptab = vashr_optab;
1601 else if (binoptab == lshr_optab)
1602 otheroptab = vlshr_optab;
1603 else if (binoptab == rotl_optab)
1604 otheroptab = vrotl_optab;
1605 else if (binoptab == rotr_optab)
1606 otheroptab = vrotr_optab;
1608 if (otheroptab
1609 && (icode = optab_handler (otheroptab, mode)) != CODE_FOR_nothing)
1611 /* The scalar may have been extended to be too wide. Truncate
1612 it back to the proper size to fit in the broadcast vector. */
1613 scalar_mode inner_mode = GET_MODE_INNER (mode);
1614 if (!CONST_INT_P (op1)
1615 && (GET_MODE_BITSIZE (as_a <scalar_int_mode> (GET_MODE (op1)))
1616 > GET_MODE_BITSIZE (inner_mode)))
1617 op1 = force_reg (inner_mode,
1618 simplify_gen_unary (TRUNCATE, inner_mode, op1,
1619 GET_MODE (op1)));
1620 rtx vop1 = expand_vector_broadcast (mode, op1);
1621 if (vop1)
1623 temp = expand_binop_directly (icode, mode, otheroptab, op0, vop1,
1624 target, unsignedp, methods, last);
1625 if (temp)
1626 return temp;
1631 /* Look for a wider mode of the same class for which we think we
1632 can open-code the operation. Check for a widening multiply at the
1633 wider mode as well. */
1635 if (CLASS_HAS_WIDER_MODES_P (mclass)
1636 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1637 FOR_EACH_WIDER_MODE (wider_mode, mode)
1639 machine_mode next_mode;
1640 if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing
1641 || (binoptab == smul_optab
1642 && GET_MODE_WIDER_MODE (wider_mode).exists (&next_mode)
1643 && (find_widening_optab_handler ((unsignedp
1644 ? umul_widen_optab
1645 : smul_widen_optab),
1646 next_mode, mode)
1647 != CODE_FOR_nothing)))
1649 rtx xop0 = op0, xop1 = op1;
1650 int no_extend = 0;
1652 /* For certain integer operations, we need not actually extend
1653 the narrow operands, as long as we will truncate
1654 the results to the same narrowness. */
1656 if ((binoptab == ior_optab || binoptab == and_optab
1657 || binoptab == xor_optab
1658 || binoptab == add_optab || binoptab == sub_optab
1659 || binoptab == smul_optab || binoptab == ashl_optab)
1660 && mclass == MODE_INT)
1662 no_extend = 1;
1663 xop0 = avoid_expensive_constant (mode, binoptab, 0,
1664 xop0, unsignedp);
1665 if (binoptab != ashl_optab)
1666 xop1 = avoid_expensive_constant (mode, binoptab, 1,
1667 xop1, unsignedp);
1670 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1672 /* The second operand of a shift must always be extended. */
1673 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1674 no_extend && binoptab != ashl_optab);
1676 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1677 unsignedp, OPTAB_DIRECT);
1678 if (temp)
1680 if (mclass != MODE_INT
1681 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
1683 if (target == 0)
1684 target = gen_reg_rtx (mode);
1685 convert_move (target, temp, 0);
1686 return target;
1688 else
1689 return gen_lowpart (mode, temp);
1691 else
1692 delete_insns_since (last);
1696 /* If operation is commutative,
1697 try to make the first operand a register.
1698 Even better, try to make it the same as the target.
1699 Also try to make the last operand a constant. */
1700 if (commutative_optab_p (binoptab)
1701 && swap_commutative_operands_with_target (target, op0, op1))
1702 std::swap (op0, op1);
1704 /* These can be done a word at a time. */
1705 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1706 && is_int_mode (mode, &int_mode)
1707 && GET_MODE_SIZE (int_mode) > UNITS_PER_WORD
1708 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1710 int i;
1711 rtx_insn *insns;
1713 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1714 won't be accurate, so use a new target. */
1715 if (target == 0
1716 || target == op0
1717 || target == op1
1718 || reg_overlap_mentioned_p (target, op0)
1719 || reg_overlap_mentioned_p (target, op1)
1720 || !valid_multiword_target_p (target))
1721 target = gen_reg_rtx (int_mode);
1723 start_sequence ();
1725 /* Do the actual arithmetic. */
1726 machine_mode op0_mode = GET_MODE (op0);
1727 machine_mode op1_mode = GET_MODE (op1);
1728 if (op0_mode == VOIDmode)
1729 op0_mode = int_mode;
1730 if (op1_mode == VOIDmode)
1731 op1_mode = int_mode;
1732 for (i = 0; i < GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD; i++)
1734 rtx target_piece = operand_subword (target, i, 1, int_mode);
1735 rtx x = expand_binop (word_mode, binoptab,
1736 operand_subword_force (op0, i, op0_mode),
1737 operand_subword_force (op1, i, op1_mode),
1738 target_piece, unsignedp, next_methods);
1740 if (x == 0)
1741 break;
1743 if (target_piece != x)
1744 emit_move_insn (target_piece, x);
1747 insns = get_insns ();
1748 end_sequence ();
1750 if (i == GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD)
1752 emit_insn (insns);
1753 return target;
1757 /* Synthesize double word shifts from single word shifts. */
1758 if ((binoptab == lshr_optab || binoptab == ashl_optab
1759 || binoptab == ashr_optab)
1760 && is_int_mode (mode, &int_mode)
1761 && (CONST_INT_P (op1) || optimize_insn_for_speed_p ())
1762 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
1763 && GET_MODE_PRECISION (int_mode) == GET_MODE_BITSIZE (int_mode)
1764 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing
1765 && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1766 && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1768 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1769 scalar_int_mode op1_mode;
1771 double_shift_mask = targetm.shift_truncation_mask (int_mode);
1772 shift_mask = targetm.shift_truncation_mask (word_mode);
1773 op1_mode = (GET_MODE (op1) != VOIDmode
1774 ? as_a <scalar_int_mode> (GET_MODE (op1))
1775 : word_mode);
1777 /* Apply the truncation to constant shifts. */
1778 if (double_shift_mask > 0 && CONST_INT_P (op1))
1779 op1 = gen_int_mode (INTVAL (op1) & double_shift_mask, op1_mode);
1781 if (op1 == CONST0_RTX (op1_mode))
1782 return op0;
1784 /* Make sure that this is a combination that expand_doubleword_shift
1785 can handle. See the comments there for details. */
1786 if (double_shift_mask == 0
1787 || (shift_mask == BITS_PER_WORD - 1
1788 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1790 rtx_insn *insns;
1791 rtx into_target, outof_target;
1792 rtx into_input, outof_input;
1793 int left_shift, outof_word;
1795 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1796 won't be accurate, so use a new target. */
1797 if (target == 0
1798 || target == op0
1799 || target == op1
1800 || reg_overlap_mentioned_p (target, op0)
1801 || reg_overlap_mentioned_p (target, op1)
1802 || !valid_multiword_target_p (target))
1803 target = gen_reg_rtx (int_mode);
1805 start_sequence ();
1807 /* OUTOF_* is the word we are shifting bits away from, and
1808 INTO_* is the word that we are shifting bits towards, thus
1809 they differ depending on the direction of the shift and
1810 WORDS_BIG_ENDIAN. */
1812 left_shift = binoptab == ashl_optab;
1813 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1815 outof_target = operand_subword (target, outof_word, 1, int_mode);
1816 into_target = operand_subword (target, 1 - outof_word, 1, int_mode);
1818 outof_input = operand_subword_force (op0, outof_word, int_mode);
1819 into_input = operand_subword_force (op0, 1 - outof_word, int_mode);
1821 if (expand_doubleword_shift (op1_mode, binoptab,
1822 outof_input, into_input, op1,
1823 outof_target, into_target,
1824 unsignedp, next_methods, shift_mask))
1826 insns = get_insns ();
1827 end_sequence ();
1829 emit_insn (insns);
1830 return target;
1832 end_sequence ();
1836 /* Synthesize double word rotates from single word shifts. */
1837 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1838 && is_int_mode (mode, &int_mode)
1839 && CONST_INT_P (op1)
1840 && GET_MODE_PRECISION (int_mode) == 2 * BITS_PER_WORD
1841 && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1842 && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1844 rtx_insn *insns;
1845 rtx into_target, outof_target;
1846 rtx into_input, outof_input;
1847 rtx inter;
1848 int shift_count, left_shift, outof_word;
1850 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1851 won't be accurate, so use a new target. Do this also if target is not
1852 a REG, first because having a register instead may open optimization
1853 opportunities, and second because if target and op0 happen to be MEMs
1854 designating the same location, we would risk clobbering it too early
1855 in the code sequence we generate below. */
1856 if (target == 0
1857 || target == op0
1858 || target == op1
1859 || !REG_P (target)
1860 || reg_overlap_mentioned_p (target, op0)
1861 || reg_overlap_mentioned_p (target, op1)
1862 || !valid_multiword_target_p (target))
1863 target = gen_reg_rtx (int_mode);
1865 start_sequence ();
1867 shift_count = INTVAL (op1);
1869 /* OUTOF_* is the word we are shifting bits away from, and
1870 INTO_* is the word that we are shifting bits towards, thus
1871 they differ depending on the direction of the shift and
1872 WORDS_BIG_ENDIAN. */
1874 left_shift = (binoptab == rotl_optab);
1875 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1877 outof_target = operand_subword (target, outof_word, 1, int_mode);
1878 into_target = operand_subword (target, 1 - outof_word, 1, int_mode);
1880 outof_input = operand_subword_force (op0, outof_word, int_mode);
1881 into_input = operand_subword_force (op0, 1 - outof_word, int_mode);
1883 if (shift_count == BITS_PER_WORD)
1885 /* This is just a word swap. */
1886 emit_move_insn (outof_target, into_input);
1887 emit_move_insn (into_target, outof_input);
1888 inter = const0_rtx;
1890 else
1892 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1893 HOST_WIDE_INT first_shift_count, second_shift_count;
1894 optab reverse_unsigned_shift, unsigned_shift;
1896 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1897 ? lshr_optab : ashl_optab);
1899 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1900 ? ashl_optab : lshr_optab);
1902 if (shift_count > BITS_PER_WORD)
1904 first_shift_count = shift_count - BITS_PER_WORD;
1905 second_shift_count = 2 * BITS_PER_WORD - shift_count;
1907 else
1909 first_shift_count = BITS_PER_WORD - shift_count;
1910 second_shift_count = shift_count;
1912 rtx first_shift_count_rtx
1913 = gen_int_shift_amount (word_mode, first_shift_count);
1914 rtx second_shift_count_rtx
1915 = gen_int_shift_amount (word_mode, second_shift_count);
1917 into_temp1 = expand_binop (word_mode, unsigned_shift,
1918 outof_input, first_shift_count_rtx,
1919 NULL_RTX, unsignedp, next_methods);
1920 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1921 into_input, second_shift_count_rtx,
1922 NULL_RTX, unsignedp, next_methods);
1924 if (into_temp1 != 0 && into_temp2 != 0)
1925 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1926 into_target, unsignedp, next_methods);
1927 else
1928 inter = 0;
1930 if (inter != 0 && inter != into_target)
1931 emit_move_insn (into_target, inter);
1933 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1934 into_input, first_shift_count_rtx,
1935 NULL_RTX, unsignedp, next_methods);
1936 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1937 outof_input, second_shift_count_rtx,
1938 NULL_RTX, unsignedp, next_methods);
1940 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1941 inter = expand_binop (word_mode, ior_optab,
1942 outof_temp1, outof_temp2,
1943 outof_target, unsignedp, next_methods);
1945 if (inter != 0 && inter != outof_target)
1946 emit_move_insn (outof_target, inter);
1949 insns = get_insns ();
1950 end_sequence ();
1952 if (inter != 0)
1954 emit_insn (insns);
1955 return target;
1959 /* These can be done a word at a time by propagating carries. */
1960 if ((binoptab == add_optab || binoptab == sub_optab)
1961 && is_int_mode (mode, &int_mode)
1962 && GET_MODE_SIZE (int_mode) >= 2 * UNITS_PER_WORD
1963 && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1965 unsigned int i;
1966 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1967 const unsigned int nwords = GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD;
1968 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1969 rtx xop0, xop1, xtarget;
1971 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1972 value is one of those, use it. Otherwise, use 1 since it is the
1973 one easiest to get. */
1974 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1975 int normalizep = STORE_FLAG_VALUE;
1976 #else
1977 int normalizep = 1;
1978 #endif
1980 /* Prepare the operands. */
1981 xop0 = force_reg (int_mode, op0);
1982 xop1 = force_reg (int_mode, op1);
1984 xtarget = gen_reg_rtx (int_mode);
1986 if (target == 0 || !REG_P (target) || !valid_multiword_target_p (target))
1987 target = xtarget;
1989 /* Indicate for flow that the entire target reg is being set. */
1990 if (REG_P (target))
1991 emit_clobber (xtarget);
1993 /* Do the actual arithmetic. */
1994 for (i = 0; i < nwords; i++)
1996 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1997 rtx target_piece = operand_subword (xtarget, index, 1, int_mode);
1998 rtx op0_piece = operand_subword_force (xop0, index, int_mode);
1999 rtx op1_piece = operand_subword_force (xop1, index, int_mode);
2000 rtx x;
2002 /* Main add/subtract of the input operands. */
2003 x = expand_binop (word_mode, binoptab,
2004 op0_piece, op1_piece,
2005 target_piece, unsignedp, next_methods);
2006 if (x == 0)
2007 break;
2009 if (i + 1 < nwords)
2011 /* Store carry from main add/subtract. */
2012 carry_out = gen_reg_rtx (word_mode);
2013 carry_out = emit_store_flag_force (carry_out,
2014 (binoptab == add_optab
2015 ? LT : GT),
2016 x, op0_piece,
2017 word_mode, 1, normalizep);
2020 if (i > 0)
2022 rtx newx;
2024 /* Add/subtract previous carry to main result. */
2025 newx = expand_binop (word_mode,
2026 normalizep == 1 ? binoptab : otheroptab,
2027 x, carry_in,
2028 NULL_RTX, 1, next_methods);
2030 if (i + 1 < nwords)
2032 /* Get out carry from adding/subtracting carry in. */
2033 rtx carry_tmp = gen_reg_rtx (word_mode);
2034 carry_tmp = emit_store_flag_force (carry_tmp,
2035 (binoptab == add_optab
2036 ? LT : GT),
2037 newx, x,
2038 word_mode, 1, normalizep);
2040 /* Logical-ior the two poss. carry together. */
2041 carry_out = expand_binop (word_mode, ior_optab,
2042 carry_out, carry_tmp,
2043 carry_out, 0, next_methods);
2044 if (carry_out == 0)
2045 break;
2047 emit_move_insn (target_piece, newx);
2049 else
2051 if (x != target_piece)
2052 emit_move_insn (target_piece, x);
2055 carry_in = carry_out;
2058 if (i == GET_MODE_BITSIZE (int_mode) / (unsigned) BITS_PER_WORD)
2060 if (optab_handler (mov_optab, int_mode) != CODE_FOR_nothing
2061 || ! rtx_equal_p (target, xtarget))
2063 rtx_insn *temp = emit_move_insn (target, xtarget);
2065 set_dst_reg_note (temp, REG_EQUAL,
2066 gen_rtx_fmt_ee (optab_to_code (binoptab),
2067 int_mode, copy_rtx (xop0),
2068 copy_rtx (xop1)),
2069 target);
2071 else
2072 target = xtarget;
2074 return target;
2077 else
2078 delete_insns_since (last);
2081 /* Attempt to synthesize double word multiplies using a sequence of word
2082 mode multiplications. We first attempt to generate a sequence using a
2083 more efficient unsigned widening multiply, and if that fails we then
2084 try using a signed widening multiply. */
2086 if (binoptab == smul_optab
2087 && is_int_mode (mode, &int_mode)
2088 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
2089 && optab_handler (smul_optab, word_mode) != CODE_FOR_nothing
2090 && optab_handler (add_optab, word_mode) != CODE_FOR_nothing)
2092 rtx product = NULL_RTX;
2093 if (convert_optab_handler (umul_widen_optab, int_mode, word_mode)
2094 != CODE_FOR_nothing)
2096 product = expand_doubleword_mult (int_mode, op0, op1, target,
2097 true, methods);
2098 if (!product)
2099 delete_insns_since (last);
2102 if (product == NULL_RTX
2103 && (convert_optab_handler (smul_widen_optab, int_mode, word_mode)
2104 != CODE_FOR_nothing))
2106 product = expand_doubleword_mult (int_mode, op0, op1, target,
2107 false, methods);
2108 if (!product)
2109 delete_insns_since (last);
2112 if (product != NULL_RTX)
2114 if (optab_handler (mov_optab, int_mode) != CODE_FOR_nothing)
2116 rtx_insn *move = emit_move_insn (target ? target : product,
2117 product);
2118 set_dst_reg_note (move,
2119 REG_EQUAL,
2120 gen_rtx_fmt_ee (MULT, int_mode,
2121 copy_rtx (op0),
2122 copy_rtx (op1)),
2123 target ? target : product);
2125 return product;
2129 /* Attempt to synthetize double word modulo by constant divisor. */
2130 if ((binoptab == umod_optab
2131 || binoptab == smod_optab
2132 || binoptab == udiv_optab
2133 || binoptab == sdiv_optab)
2134 && optimize
2135 && CONST_INT_P (op1)
2136 && is_int_mode (mode, &int_mode)
2137 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
2138 && optab_handler ((binoptab == umod_optab || binoptab == udiv_optab)
2139 ? udivmod_optab : sdivmod_optab,
2140 int_mode) == CODE_FOR_nothing
2141 && optab_handler (and_optab, word_mode) != CODE_FOR_nothing
2142 && optab_handler (add_optab, word_mode) != CODE_FOR_nothing
2143 && optimize_insn_for_speed_p ())
2145 rtx res = NULL_RTX;
2146 if ((binoptab == umod_optab || binoptab == smod_optab)
2147 && (INTVAL (op1) & 1) == 0)
2148 res = expand_doubleword_mod (int_mode, op0, op1,
2149 binoptab == umod_optab);
2150 else
2152 rtx quot = expand_doubleword_divmod (int_mode, op0, op1, &res,
2153 binoptab == umod_optab
2154 || binoptab == udiv_optab);
2155 if (quot == NULL_RTX)
2156 res = NULL_RTX;
2157 else if (binoptab == udiv_optab || binoptab == sdiv_optab)
2158 res = quot;
2160 if (res != NULL_RTX)
2162 if (optab_handler (mov_optab, int_mode) != CODE_FOR_nothing)
2164 rtx_insn *move = emit_move_insn (target ? target : res,
2165 res);
2166 set_dst_reg_note (move, REG_EQUAL,
2167 gen_rtx_fmt_ee (optab_to_code (binoptab),
2168 int_mode, copy_rtx (op0), op1),
2169 target ? target : res);
2171 return res;
2173 else
2174 delete_insns_since (last);
2177 /* It can't be open-coded in this mode.
2178 Use a library call if one is available and caller says that's ok. */
2180 libfunc = optab_libfunc (binoptab, mode);
2181 if (libfunc
2182 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
2184 rtx_insn *insns;
2185 rtx op1x = op1;
2186 machine_mode op1_mode = mode;
2187 rtx value;
2189 start_sequence ();
2191 if (shift_optab_p (binoptab))
2193 op1_mode = targetm.libgcc_shift_count_mode ();
2194 /* Specify unsigned here,
2195 since negative shift counts are meaningless. */
2196 op1x = convert_to_mode (op1_mode, op1, 1);
2199 if (GET_MODE (op0) != VOIDmode
2200 && GET_MODE (op0) != mode)
2201 op0 = convert_to_mode (mode, op0, unsignedp);
2203 /* Pass 1 for NO_QUEUE so we don't lose any increments
2204 if the libcall is cse'd or moved. */
2205 value = emit_library_call_value (libfunc,
2206 NULL_RTX, LCT_CONST, mode,
2207 op0, mode, op1x, op1_mode);
2209 insns = get_insns ();
2210 end_sequence ();
2212 bool trapv = trapv_binoptab_p (binoptab);
2213 target = gen_reg_rtx (mode);
2214 emit_libcall_block_1 (insns, target, value,
2215 trapv ? NULL_RTX
2216 : gen_rtx_fmt_ee (optab_to_code (binoptab),
2217 mode, op0, op1), trapv);
2219 return target;
2222 delete_insns_since (last);
2224 /* It can't be done in this mode. Can we do it in a wider mode? */
2226 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
2227 || methods == OPTAB_MUST_WIDEN))
2229 /* Caller says, don't even try. */
2230 delete_insns_since (entry_last);
2231 return 0;
2234 /* Compute the value of METHODS to pass to recursive calls.
2235 Don't allow widening to be tried recursively. */
2237 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
2239 /* Look for a wider mode of the same class for which it appears we can do
2240 the operation. */
2242 if (CLASS_HAS_WIDER_MODES_P (mclass))
2244 /* This code doesn't make sense for conversion optabs, since we
2245 wouldn't then want to extend the operands to be the same size
2246 as the result. */
2247 gcc_assert (!convert_optab_p (binoptab));
2248 FOR_EACH_WIDER_MODE (wider_mode, mode)
2250 if (optab_handler (binoptab, wider_mode)
2251 || (methods == OPTAB_LIB
2252 && optab_libfunc (binoptab, wider_mode)))
2254 rtx xop0 = op0, xop1 = op1;
2255 int no_extend = 0;
2257 /* For certain integer operations, we need not actually extend
2258 the narrow operands, as long as we will truncate
2259 the results to the same narrowness. */
2261 if ((binoptab == ior_optab || binoptab == and_optab
2262 || binoptab == xor_optab
2263 || binoptab == add_optab || binoptab == sub_optab
2264 || binoptab == smul_optab || binoptab == ashl_optab)
2265 && mclass == MODE_INT)
2266 no_extend = 1;
2268 xop0 = widen_operand (xop0, wider_mode, mode,
2269 unsignedp, no_extend);
2271 /* The second operand of a shift must always be extended. */
2272 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
2273 no_extend && binoptab != ashl_optab);
2275 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
2276 unsignedp, methods);
2277 if (temp)
2279 if (mclass != MODE_INT
2280 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
2282 if (target == 0)
2283 target = gen_reg_rtx (mode);
2284 convert_move (target, temp, 0);
2285 return target;
2287 else
2288 return gen_lowpart (mode, temp);
2290 else
2291 delete_insns_since (last);
2296 delete_insns_since (entry_last);
2297 return 0;
2300 /* Expand a binary operator which has both signed and unsigned forms.
2301 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2302 signed operations.
2304 If we widen unsigned operands, we may use a signed wider operation instead
2305 of an unsigned wider operation, since the result would be the same. */
2308 sign_expand_binop (machine_mode mode, optab uoptab, optab soptab,
2309 rtx op0, rtx op1, rtx target, int unsignedp,
2310 enum optab_methods methods)
2312 rtx temp;
2313 optab direct_optab = unsignedp ? uoptab : soptab;
2314 bool save_enable;
2316 /* Do it without widening, if possible. */
2317 temp = expand_binop (mode, direct_optab, op0, op1, target,
2318 unsignedp, OPTAB_DIRECT);
2319 if (temp || methods == OPTAB_DIRECT)
2320 return temp;
2322 /* Try widening to a signed int. Disable any direct use of any
2323 signed insn in the current mode. */
2324 save_enable = swap_optab_enable (soptab, mode, false);
2326 temp = expand_binop (mode, soptab, op0, op1, target,
2327 unsignedp, OPTAB_WIDEN);
2329 /* For unsigned operands, try widening to an unsigned int. */
2330 if (!temp && unsignedp)
2331 temp = expand_binop (mode, uoptab, op0, op1, target,
2332 unsignedp, OPTAB_WIDEN);
2333 if (temp || methods == OPTAB_WIDEN)
2334 goto egress;
2336 /* Use the right width libcall if that exists. */
2337 temp = expand_binop (mode, direct_optab, op0, op1, target,
2338 unsignedp, OPTAB_LIB);
2339 if (temp || methods == OPTAB_LIB)
2340 goto egress;
2342 /* Must widen and use a libcall, use either signed or unsigned. */
2343 temp = expand_binop (mode, soptab, op0, op1, target,
2344 unsignedp, methods);
2345 if (!temp && unsignedp)
2346 temp = expand_binop (mode, uoptab, op0, op1, target,
2347 unsignedp, methods);
2349 egress:
2350 /* Undo the fiddling above. */
2351 if (save_enable)
2352 swap_optab_enable (soptab, mode, true);
2353 return temp;
2356 /* Generate code to perform an operation specified by UNOPPTAB
2357 on operand OP0, with two results to TARG0 and TARG1.
2358 We assume that the order of the operands for the instruction
2359 is TARG0, TARG1, OP0.
2361 Either TARG0 or TARG1 may be zero, but what that means is that
2362 the result is not actually wanted. We will generate it into
2363 a dummy pseudo-reg and discard it. They may not both be zero.
2365 Returns 1 if this operation can be performed; 0 if not. */
2368 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
2369 int unsignedp)
2371 machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2372 enum mode_class mclass;
2373 machine_mode wider_mode;
2374 rtx_insn *entry_last = get_last_insn ();
2375 rtx_insn *last;
2377 mclass = GET_MODE_CLASS (mode);
2379 if (!targ0)
2380 targ0 = gen_reg_rtx (mode);
2381 if (!targ1)
2382 targ1 = gen_reg_rtx (mode);
2384 /* Record where to go back to if we fail. */
2385 last = get_last_insn ();
2387 if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
2389 class expand_operand ops[3];
2390 enum insn_code icode = optab_handler (unoptab, mode);
2392 create_fixed_operand (&ops[0], targ0);
2393 create_fixed_operand (&ops[1], targ1);
2394 create_convert_operand_from (&ops[2], op0, mode, unsignedp);
2395 if (maybe_expand_insn (icode, 3, ops))
2396 return 1;
2399 /* It can't be done in this mode. Can we do it in a wider mode? */
2401 if (CLASS_HAS_WIDER_MODES_P (mclass))
2403 FOR_EACH_WIDER_MODE (wider_mode, mode)
2405 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2407 rtx t0 = gen_reg_rtx (wider_mode);
2408 rtx t1 = gen_reg_rtx (wider_mode);
2409 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2411 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
2413 convert_move (targ0, t0, unsignedp);
2414 convert_move (targ1, t1, unsignedp);
2415 return 1;
2417 else
2418 delete_insns_since (last);
2423 delete_insns_since (entry_last);
2424 return 0;
2427 /* Generate code to perform an operation specified by BINOPTAB
2428 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2429 We assume that the order of the operands for the instruction
2430 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2431 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2433 Either TARG0 or TARG1 may be zero, but what that means is that
2434 the result is not actually wanted. We will generate it into
2435 a dummy pseudo-reg and discard it. They may not both be zero.
2437 Returns 1 if this operation can be performed; 0 if not. */
2440 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2441 int unsignedp)
2443 machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2444 enum mode_class mclass;
2445 machine_mode wider_mode;
2446 rtx_insn *entry_last = get_last_insn ();
2447 rtx_insn *last;
2449 mclass = GET_MODE_CLASS (mode);
2451 if (!targ0)
2452 targ0 = gen_reg_rtx (mode);
2453 if (!targ1)
2454 targ1 = gen_reg_rtx (mode);
2456 /* Record where to go back to if we fail. */
2457 last = get_last_insn ();
2459 if (optab_handler (binoptab, mode) != CODE_FOR_nothing)
2461 class expand_operand ops[4];
2462 enum insn_code icode = optab_handler (binoptab, mode);
2463 machine_mode mode0 = insn_data[icode].operand[1].mode;
2464 machine_mode mode1 = insn_data[icode].operand[2].mode;
2465 rtx xop0 = op0, xop1 = op1;
2467 /* If we are optimizing, force expensive constants into a register. */
2468 xop0 = avoid_expensive_constant (mode0, binoptab, 0, xop0, unsignedp);
2469 xop1 = avoid_expensive_constant (mode1, binoptab, 1, xop1, unsignedp);
2471 create_fixed_operand (&ops[0], targ0);
2472 create_convert_operand_from (&ops[1], xop0, mode, unsignedp);
2473 create_convert_operand_from (&ops[2], xop1, mode, unsignedp);
2474 create_fixed_operand (&ops[3], targ1);
2475 if (maybe_expand_insn (icode, 4, ops))
2476 return 1;
2477 delete_insns_since (last);
2480 /* It can't be done in this mode. Can we do it in a wider mode? */
2482 if (CLASS_HAS_WIDER_MODES_P (mclass))
2484 FOR_EACH_WIDER_MODE (wider_mode, mode)
2486 if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing)
2488 rtx t0 = gen_reg_rtx (wider_mode);
2489 rtx t1 = gen_reg_rtx (wider_mode);
2490 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2491 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2493 if (expand_twoval_binop (binoptab, cop0, cop1,
2494 t0, t1, unsignedp))
2496 convert_move (targ0, t0, unsignedp);
2497 convert_move (targ1, t1, unsignedp);
2498 return 1;
2500 else
2501 delete_insns_since (last);
2506 delete_insns_since (entry_last);
2507 return 0;
2510 /* Expand the two-valued library call indicated by BINOPTAB, but
2511 preserve only one of the values. If TARG0 is non-NULL, the first
2512 value is placed into TARG0; otherwise the second value is placed
2513 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2514 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2515 This routine assumes that the value returned by the library call is
2516 as if the return value was of an integral mode twice as wide as the
2517 mode of OP0. Returns 1 if the call was successful. */
2519 bool
2520 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2521 rtx targ0, rtx targ1, enum rtx_code code)
2523 machine_mode mode;
2524 machine_mode libval_mode;
2525 rtx libval;
2526 rtx_insn *insns;
2527 rtx libfunc;
2529 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2530 gcc_assert (!targ0 != !targ1);
2532 mode = GET_MODE (op0);
2533 libfunc = optab_libfunc (binoptab, mode);
2534 if (!libfunc)
2535 return false;
2537 /* The value returned by the library function will have twice as
2538 many bits as the nominal MODE. */
2539 libval_mode = smallest_int_mode_for_size (2 * GET_MODE_BITSIZE (mode));
2540 start_sequence ();
2541 libval = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
2542 libval_mode,
2543 op0, mode,
2544 op1, mode);
2545 /* Get the part of VAL containing the value that we want. */
2546 libval = simplify_gen_subreg (mode, libval, libval_mode,
2547 targ0 ? 0 : GET_MODE_SIZE (mode));
2548 insns = get_insns ();
2549 end_sequence ();
2550 /* Move the into the desired location. */
2551 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2552 gen_rtx_fmt_ee (code, mode, op0, op1));
2554 return true;
2558 /* Wrapper around expand_unop which takes an rtx code to specify
2559 the operation to perform, not an optab pointer. All other
2560 arguments are the same. */
2562 expand_simple_unop (machine_mode mode, enum rtx_code code, rtx op0,
2563 rtx target, int unsignedp)
2565 optab unop = code_to_optab (code);
2566 gcc_assert (unop);
2568 return expand_unop (mode, unop, op0, target, unsignedp);
2571 /* Try calculating
2572 (clz:narrow x)
2574 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)).
2576 A similar operation can be used for clrsb. UNOPTAB says which operation
2577 we are trying to expand. */
2578 static rtx
2579 widen_leading (scalar_int_mode mode, rtx op0, rtx target, optab unoptab)
2581 opt_scalar_int_mode wider_mode_iter;
2582 FOR_EACH_WIDER_MODE (wider_mode_iter, mode)
2584 scalar_int_mode wider_mode = wider_mode_iter.require ();
2585 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2587 rtx xop0, temp;
2588 rtx_insn *last;
2590 last = get_last_insn ();
2592 if (target == 0)
2593 target = gen_reg_rtx (mode);
2594 xop0 = widen_operand (op0, wider_mode, mode,
2595 unoptab != clrsb_optab, false);
2596 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2597 unoptab != clrsb_optab);
2598 if (temp != 0)
2599 temp = expand_binop
2600 (wider_mode, sub_optab, temp,
2601 gen_int_mode (GET_MODE_PRECISION (wider_mode)
2602 - GET_MODE_PRECISION (mode),
2603 wider_mode),
2604 target, true, OPTAB_DIRECT);
2605 if (temp == 0)
2606 delete_insns_since (last);
2608 return temp;
2611 return 0;
2614 /* Attempt to emit (clrsb:mode op0) as
2615 (plus:mode (clz:mode (xor:mode op0 (ashr:mode op0 (const_int prec-1))))
2616 (const_int -1))
2617 if CLZ_DEFINED_VALUE_AT_ZERO (mode, val) is 2 and val is prec,
2618 or as
2619 (clz:mode (ior:mode (xor:mode (ashl:mode op0 (const_int 1))
2620 (ashr:mode op0 (const_int prec-1)))
2621 (const_int 1)))
2622 otherwise. */
2624 static rtx
2625 expand_clrsb_using_clz (scalar_int_mode mode, rtx op0, rtx target)
2627 if (optimize_insn_for_size_p ()
2628 || optab_handler (clz_optab, mode) == CODE_FOR_nothing)
2629 return NULL_RTX;
2631 start_sequence ();
2632 HOST_WIDE_INT val = 0;
2633 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, val) != 2
2634 || val != GET_MODE_PRECISION (mode))
2635 val = 0;
2636 else
2637 val = 1;
2639 rtx temp2 = op0;
2640 if (!val)
2642 temp2 = expand_binop (mode, ashl_optab, op0, const1_rtx,
2643 NULL_RTX, 0, OPTAB_DIRECT);
2644 if (!temp2)
2646 fail:
2647 end_sequence ();
2648 return NULL_RTX;
2652 rtx temp = expand_binop (mode, ashr_optab, op0,
2653 GEN_INT (GET_MODE_PRECISION (mode) - 1),
2654 NULL_RTX, 0, OPTAB_DIRECT);
2655 if (!temp)
2656 goto fail;
2658 temp = expand_binop (mode, xor_optab, temp2, temp, NULL_RTX, 0,
2659 OPTAB_DIRECT);
2660 if (!temp)
2661 goto fail;
2663 if (!val)
2665 temp = expand_binop (mode, ior_optab, temp, const1_rtx,
2666 NULL_RTX, 0, OPTAB_DIRECT);
2667 if (!temp)
2668 goto fail;
2670 temp = expand_unop_direct (mode, clz_optab, temp, val ? NULL_RTX : target,
2671 true);
2672 if (!temp)
2673 goto fail;
2674 if (val)
2676 temp = expand_binop (mode, add_optab, temp, constm1_rtx,
2677 target, 0, OPTAB_DIRECT);
2678 if (!temp)
2679 goto fail;
2682 rtx_insn *seq = get_insns ();
2683 end_sequence ();
2685 add_equal_note (seq, temp, CLRSB, op0, NULL_RTX, mode);
2686 emit_insn (seq);
2687 return temp;
2690 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2691 quantities, choosing which based on whether the high word is nonzero. */
2692 static rtx
2693 expand_doubleword_clz (scalar_int_mode mode, rtx op0, rtx target)
2695 rtx xop0 = force_reg (mode, op0);
2696 rtx subhi = gen_highpart (word_mode, xop0);
2697 rtx sublo = gen_lowpart (word_mode, xop0);
2698 rtx_code_label *hi0_label = gen_label_rtx ();
2699 rtx_code_label *after_label = gen_label_rtx ();
2700 rtx_insn *seq;
2701 rtx temp, result;
2703 /* If we were not given a target, use a word_mode register, not a
2704 'mode' register. The result will fit, and nobody is expecting
2705 anything bigger (the return type of __builtin_clz* is int). */
2706 if (!target)
2707 target = gen_reg_rtx (word_mode);
2709 /* In any case, write to a word_mode scratch in both branches of the
2710 conditional, so we can ensure there is a single move insn setting
2711 'target' to tag a REG_EQUAL note on. */
2712 result = gen_reg_rtx (word_mode);
2714 start_sequence ();
2716 /* If the high word is not equal to zero,
2717 then clz of the full value is clz of the high word. */
2718 emit_cmp_and_jump_insns (subhi, CONST0_RTX (word_mode), EQ, 0,
2719 word_mode, true, hi0_label);
2721 temp = expand_unop_direct (word_mode, clz_optab, subhi, result, true);
2722 if (!temp)
2723 goto fail;
2725 if (temp != result)
2726 convert_move (result, temp, true);
2728 emit_jump_insn (targetm.gen_jump (after_label));
2729 emit_barrier ();
2731 /* Else clz of the full value is clz of the low word plus the number
2732 of bits in the high word. */
2733 emit_label (hi0_label);
2735 temp = expand_unop_direct (word_mode, clz_optab, sublo, 0, true);
2736 if (!temp)
2737 goto fail;
2738 temp = expand_binop (word_mode, add_optab, temp,
2739 gen_int_mode (GET_MODE_BITSIZE (word_mode), word_mode),
2740 result, true, OPTAB_DIRECT);
2741 if (!temp)
2742 goto fail;
2743 if (temp != result)
2744 convert_move (result, temp, true);
2746 emit_label (after_label);
2747 convert_move (target, result, true);
2749 seq = get_insns ();
2750 end_sequence ();
2752 add_equal_note (seq, target, CLZ, xop0, NULL_RTX, mode);
2753 emit_insn (seq);
2754 return target;
2756 fail:
2757 end_sequence ();
2758 return 0;
2761 /* Try calculating popcount of a double-word quantity as two popcount's of
2762 word-sized quantities and summing up the results. */
2763 static rtx
2764 expand_doubleword_popcount (scalar_int_mode mode, rtx op0, rtx target)
2766 rtx t0, t1, t;
2767 rtx_insn *seq;
2769 start_sequence ();
2771 t0 = expand_unop_direct (word_mode, popcount_optab,
2772 operand_subword_force (op0, 0, mode), NULL_RTX,
2773 true);
2774 t1 = expand_unop_direct (word_mode, popcount_optab,
2775 operand_subword_force (op0, 1, mode), NULL_RTX,
2776 true);
2777 if (!t0 || !t1)
2779 end_sequence ();
2780 return NULL_RTX;
2783 /* If we were not given a target, use a word_mode register, not a
2784 'mode' register. The result will fit, and nobody is expecting
2785 anything bigger (the return type of __builtin_popcount* is int). */
2786 if (!target)
2787 target = gen_reg_rtx (word_mode);
2789 t = expand_binop (word_mode, add_optab, t0, t1, target, 0, OPTAB_DIRECT);
2791 seq = get_insns ();
2792 end_sequence ();
2794 add_equal_note (seq, t, POPCOUNT, op0, NULL_RTX, mode);
2795 emit_insn (seq);
2796 return t;
2799 /* Try calculating
2800 (parity:wide x)
2802 (parity:narrow (low (x) ^ high (x))) */
2803 static rtx
2804 expand_doubleword_parity (scalar_int_mode mode, rtx op0, rtx target)
2806 rtx t = expand_binop (word_mode, xor_optab,
2807 operand_subword_force (op0, 0, mode),
2808 operand_subword_force (op0, 1, mode),
2809 NULL_RTX, 0, OPTAB_DIRECT);
2810 return expand_unop (word_mode, parity_optab, t, target, true);
2813 /* Try calculating
2814 (bswap:narrow x)
2816 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2817 static rtx
2818 widen_bswap (scalar_int_mode mode, rtx op0, rtx target)
2820 rtx x;
2821 rtx_insn *last;
2822 opt_scalar_int_mode wider_mode_iter;
2824 FOR_EACH_WIDER_MODE (wider_mode_iter, mode)
2825 if (optab_handler (bswap_optab, wider_mode_iter.require ())
2826 != CODE_FOR_nothing)
2827 break;
2829 if (!wider_mode_iter.exists ())
2830 return NULL_RTX;
2832 scalar_int_mode wider_mode = wider_mode_iter.require ();
2833 last = get_last_insn ();
2835 x = widen_operand (op0, wider_mode, mode, true, true);
2836 x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true);
2838 gcc_assert (GET_MODE_PRECISION (wider_mode) == GET_MODE_BITSIZE (wider_mode)
2839 && GET_MODE_PRECISION (mode) == GET_MODE_BITSIZE (mode));
2840 if (x != 0)
2841 x = expand_shift (RSHIFT_EXPR, wider_mode, x,
2842 GET_MODE_BITSIZE (wider_mode)
2843 - GET_MODE_BITSIZE (mode),
2844 NULL_RTX, true);
2846 if (x != 0)
2848 if (target == 0)
2849 target = gen_reg_rtx (mode);
2850 emit_move_insn (target, gen_lowpart (mode, x));
2852 else
2853 delete_insns_since (last);
2855 return target;
2858 /* Try calculating bswap as two bswaps of two word-sized operands. */
2860 static rtx
2861 expand_doubleword_bswap (machine_mode mode, rtx op, rtx target)
2863 rtx t0, t1;
2865 t1 = expand_unop (word_mode, bswap_optab,
2866 operand_subword_force (op, 0, mode), NULL_RTX, true);
2867 t0 = expand_unop (word_mode, bswap_optab,
2868 operand_subword_force (op, 1, mode), NULL_RTX, true);
2870 if (target == 0 || !valid_multiword_target_p (target))
2871 target = gen_reg_rtx (mode);
2872 if (REG_P (target))
2873 emit_clobber (target);
2874 emit_move_insn (operand_subword (target, 0, 1, mode), t0);
2875 emit_move_insn (operand_subword (target, 1, 1, mode), t1);
2877 return target;
2880 /* Try calculating (parity x) as (and (popcount x) 1), where
2881 popcount can also be done in a wider mode. */
2882 static rtx
2883 expand_parity (scalar_int_mode mode, rtx op0, rtx target)
2885 enum mode_class mclass = GET_MODE_CLASS (mode);
2886 opt_scalar_int_mode wider_mode_iter;
2887 FOR_EACH_MODE_FROM (wider_mode_iter, mode)
2889 scalar_int_mode wider_mode = wider_mode_iter.require ();
2890 if (optab_handler (popcount_optab, wider_mode) != CODE_FOR_nothing)
2892 rtx xop0, temp;
2893 rtx_insn *last;
2895 last = get_last_insn ();
2897 if (target == 0 || GET_MODE (target) != wider_mode)
2898 target = gen_reg_rtx (wider_mode);
2900 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2901 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2902 true);
2903 if (temp != 0)
2904 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2905 target, true, OPTAB_DIRECT);
2907 if (temp)
2909 if (mclass != MODE_INT
2910 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
2911 return convert_to_mode (mode, temp, 0);
2912 else
2913 return gen_lowpart (mode, temp);
2915 else
2916 delete_insns_since (last);
2919 return 0;
2922 /* Try calculating ctz(x) as K - clz(x & -x) ,
2923 where K is GET_MODE_PRECISION(mode) - 1.
2925 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2926 don't have to worry about what the hardware does in that case. (If
2927 the clz instruction produces the usual value at 0, which is K, the
2928 result of this code sequence will be -1; expand_ffs, below, relies
2929 on this. It might be nice to have it be K instead, for consistency
2930 with the (very few) processors that provide a ctz with a defined
2931 value, but that would take one more instruction, and it would be
2932 less convenient for expand_ffs anyway. */
2934 static rtx
2935 expand_ctz (scalar_int_mode mode, rtx op0, rtx target)
2937 rtx_insn *seq;
2938 rtx temp;
2940 if (optab_handler (clz_optab, mode) == CODE_FOR_nothing)
2941 return 0;
2943 start_sequence ();
2945 temp = expand_unop_direct (mode, neg_optab, op0, NULL_RTX, true);
2946 if (temp)
2947 temp = expand_binop (mode, and_optab, op0, temp, NULL_RTX,
2948 true, OPTAB_DIRECT);
2949 if (temp)
2950 temp = expand_unop_direct (mode, clz_optab, temp, NULL_RTX, true);
2951 if (temp)
2952 temp = expand_binop (mode, sub_optab,
2953 gen_int_mode (GET_MODE_PRECISION (mode) - 1, mode),
2954 temp, target,
2955 true, OPTAB_DIRECT);
2956 if (temp == 0)
2958 end_sequence ();
2959 return 0;
2962 seq = get_insns ();
2963 end_sequence ();
2965 add_equal_note (seq, temp, CTZ, op0, NULL_RTX, mode);
2966 emit_insn (seq);
2967 return temp;
2971 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2972 else with the sequence used by expand_clz.
2974 The ffs builtin promises to return zero for a zero value and ctz/clz
2975 may have an undefined value in that case. If they do not give us a
2976 convenient value, we have to generate a test and branch. */
2977 static rtx
2978 expand_ffs (scalar_int_mode mode, rtx op0, rtx target)
2980 HOST_WIDE_INT val = 0;
2981 bool defined_at_zero = false;
2982 rtx temp;
2983 rtx_insn *seq;
2985 if (optab_handler (ctz_optab, mode) != CODE_FOR_nothing)
2987 start_sequence ();
2989 temp = expand_unop_direct (mode, ctz_optab, op0, 0, true);
2990 if (!temp)
2991 goto fail;
2993 defined_at_zero = (CTZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2);
2995 else if (optab_handler (clz_optab, mode) != CODE_FOR_nothing)
2997 start_sequence ();
2998 temp = expand_ctz (mode, op0, 0);
2999 if (!temp)
3000 goto fail;
3002 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2)
3004 defined_at_zero = true;
3005 val = (GET_MODE_PRECISION (mode) - 1) - val;
3008 else
3009 return 0;
3011 if (defined_at_zero && val == -1)
3012 /* No correction needed at zero. */;
3013 else
3015 /* We don't try to do anything clever with the situation found
3016 on some processors (eg Alpha) where ctz(0:mode) ==
3017 bitsize(mode). If someone can think of a way to send N to -1
3018 and leave alone all values in the range 0..N-1 (where N is a
3019 power of two), cheaper than this test-and-branch, please add it.
3021 The test-and-branch is done after the operation itself, in case
3022 the operation sets condition codes that can be recycled for this.
3023 (This is true on i386, for instance.) */
3025 rtx_code_label *nonzero_label = gen_label_rtx ();
3026 emit_cmp_and_jump_insns (op0, CONST0_RTX (mode), NE, 0,
3027 mode, true, nonzero_label);
3029 convert_move (temp, GEN_INT (-1), false);
3030 emit_label (nonzero_label);
3033 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
3034 to produce a value in the range 0..bitsize. */
3035 temp = expand_binop (mode, add_optab, temp, gen_int_mode (1, mode),
3036 target, false, OPTAB_DIRECT);
3037 if (!temp)
3038 goto fail;
3040 seq = get_insns ();
3041 end_sequence ();
3043 add_equal_note (seq, temp, FFS, op0, NULL_RTX, mode);
3044 emit_insn (seq);
3045 return temp;
3047 fail:
3048 end_sequence ();
3049 return 0;
3052 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
3053 conditions, VAL may already be a SUBREG against which we cannot generate
3054 a further SUBREG. In this case, we expect forcing the value into a
3055 register will work around the situation. */
3057 static rtx
3058 lowpart_subreg_maybe_copy (machine_mode omode, rtx val,
3059 machine_mode imode)
3061 rtx ret;
3062 ret = lowpart_subreg (omode, val, imode);
3063 if (ret == NULL)
3065 val = force_reg (imode, val);
3066 ret = lowpart_subreg (omode, val, imode);
3067 gcc_assert (ret != NULL);
3069 return ret;
3072 /* Expand a floating point absolute value or negation operation via a
3073 logical operation on the sign bit. */
3075 static rtx
3076 expand_absneg_bit (enum rtx_code code, scalar_float_mode mode,
3077 rtx op0, rtx target)
3079 const struct real_format *fmt;
3080 int bitpos, word, nwords, i;
3081 scalar_int_mode imode;
3082 rtx temp;
3083 rtx_insn *insns;
3085 /* The format has to have a simple sign bit. */
3086 fmt = REAL_MODE_FORMAT (mode);
3087 if (fmt == NULL)
3088 return NULL_RTX;
3090 bitpos = fmt->signbit_rw;
3091 if (bitpos < 0)
3092 return NULL_RTX;
3094 /* Don't create negative zeros if the format doesn't support them. */
3095 if (code == NEG && !fmt->has_signed_zero)
3096 return NULL_RTX;
3098 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3100 if (!int_mode_for_mode (mode).exists (&imode))
3101 return NULL_RTX;
3102 word = 0;
3103 nwords = 1;
3105 else
3107 imode = word_mode;
3109 if (FLOAT_WORDS_BIG_ENDIAN)
3110 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3111 else
3112 word = bitpos / BITS_PER_WORD;
3113 bitpos = bitpos % BITS_PER_WORD;
3114 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3117 wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
3118 if (code == ABS)
3119 mask = ~mask;
3121 if (target == 0
3122 || target == op0
3123 || reg_overlap_mentioned_p (target, op0)
3124 || (nwords > 1 && !valid_multiword_target_p (target)))
3125 target = gen_reg_rtx (mode);
3127 if (nwords > 1)
3129 start_sequence ();
3131 for (i = 0; i < nwords; ++i)
3133 rtx targ_piece = operand_subword (target, i, 1, mode);
3134 rtx op0_piece = operand_subword_force (op0, i, mode);
3136 if (i == word)
3138 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
3139 op0_piece,
3140 immed_wide_int_const (mask, imode),
3141 targ_piece, 1, OPTAB_LIB_WIDEN);
3142 if (temp != targ_piece)
3143 emit_move_insn (targ_piece, temp);
3145 else
3146 emit_move_insn (targ_piece, op0_piece);
3149 insns = get_insns ();
3150 end_sequence ();
3152 emit_insn (insns);
3154 else
3156 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
3157 gen_lowpart (imode, op0),
3158 immed_wide_int_const (mask, imode),
3159 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3160 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3162 set_dst_reg_note (get_last_insn (), REG_EQUAL,
3163 gen_rtx_fmt_e (code, mode, copy_rtx (op0)),
3164 target);
3167 return target;
3170 /* As expand_unop, but will fail rather than attempt the operation in a
3171 different mode or with a libcall. */
3172 static rtx
3173 expand_unop_direct (machine_mode mode, optab unoptab, rtx op0, rtx target,
3174 int unsignedp)
3176 if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
3178 class expand_operand ops[2];
3179 enum insn_code icode = optab_handler (unoptab, mode);
3180 rtx_insn *last = get_last_insn ();
3181 rtx_insn *pat;
3183 create_output_operand (&ops[0], target, mode);
3184 create_convert_operand_from (&ops[1], op0, mode, unsignedp);
3185 pat = maybe_gen_insn (icode, 2, ops);
3186 if (pat)
3188 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
3189 && ! add_equal_note (pat, ops[0].value,
3190 optab_to_code (unoptab),
3191 ops[1].value, NULL_RTX, mode))
3193 delete_insns_since (last);
3194 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
3197 emit_insn (pat);
3199 return ops[0].value;
3202 return 0;
3205 /* Generate code to perform an operation specified by UNOPTAB
3206 on operand OP0, with result having machine-mode MODE.
3208 UNSIGNEDP is for the case where we have to widen the operands
3209 to perform the operation. It says to use zero-extension.
3211 If TARGET is nonzero, the value
3212 is generated there, if it is convenient to do so.
3213 In all cases an rtx is returned for the locus of the value;
3214 this may or may not be TARGET. */
3217 expand_unop (machine_mode mode, optab unoptab, rtx op0, rtx target,
3218 int unsignedp)
3220 enum mode_class mclass = GET_MODE_CLASS (mode);
3221 machine_mode wider_mode;
3222 scalar_int_mode int_mode;
3223 scalar_float_mode float_mode;
3224 rtx temp;
3225 rtx libfunc;
3227 temp = expand_unop_direct (mode, unoptab, op0, target, unsignedp);
3228 if (temp)
3229 return temp;
3231 /* It can't be done in this mode. Can we open-code it in a wider mode? */
3233 /* Widening (or narrowing) clz needs special treatment. */
3234 if (unoptab == clz_optab)
3236 if (is_a <scalar_int_mode> (mode, &int_mode))
3238 temp = widen_leading (int_mode, op0, target, unoptab);
3239 if (temp)
3240 return temp;
3242 if (GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
3243 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
3245 temp = expand_doubleword_clz (int_mode, op0, target);
3246 if (temp)
3247 return temp;
3251 goto try_libcall;
3254 if (unoptab == clrsb_optab)
3256 if (is_a <scalar_int_mode> (mode, &int_mode))
3258 temp = widen_leading (int_mode, op0, target, unoptab);
3259 if (temp)
3260 return temp;
3261 temp = expand_clrsb_using_clz (int_mode, op0, target);
3262 if (temp)
3263 return temp;
3265 goto try_libcall;
3268 if (unoptab == popcount_optab
3269 && is_a <scalar_int_mode> (mode, &int_mode)
3270 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
3271 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing
3272 && optimize_insn_for_speed_p ())
3274 temp = expand_doubleword_popcount (int_mode, op0, target);
3275 if (temp)
3276 return temp;
3279 if (unoptab == parity_optab
3280 && is_a <scalar_int_mode> (mode, &int_mode)
3281 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
3282 && (optab_handler (unoptab, word_mode) != CODE_FOR_nothing
3283 || optab_handler (popcount_optab, word_mode) != CODE_FOR_nothing)
3284 && optimize_insn_for_speed_p ())
3286 temp = expand_doubleword_parity (int_mode, op0, target);
3287 if (temp)
3288 return temp;
3291 /* Widening (or narrowing) bswap needs special treatment. */
3292 if (unoptab == bswap_optab)
3294 /* HImode is special because in this mode BSWAP is equivalent to ROTATE
3295 or ROTATERT. First try these directly; if this fails, then try the
3296 obvious pair of shifts with allowed widening, as this will probably
3297 be always more efficient than the other fallback methods. */
3298 if (mode == HImode)
3300 rtx_insn *last;
3301 rtx temp1, temp2;
3303 if (optab_handler (rotl_optab, mode) != CODE_FOR_nothing)
3305 temp = expand_binop (mode, rotl_optab, op0,
3306 gen_int_shift_amount (mode, 8),
3307 target, unsignedp, OPTAB_DIRECT);
3308 if (temp)
3309 return temp;
3312 if (optab_handler (rotr_optab, mode) != CODE_FOR_nothing)
3314 temp = expand_binop (mode, rotr_optab, op0,
3315 gen_int_shift_amount (mode, 8),
3316 target, unsignedp, OPTAB_DIRECT);
3317 if (temp)
3318 return temp;
3321 last = get_last_insn ();
3323 temp1 = expand_binop (mode, ashl_optab, op0,
3324 gen_int_shift_amount (mode, 8), NULL_RTX,
3325 unsignedp, OPTAB_WIDEN);
3326 temp2 = expand_binop (mode, lshr_optab, op0,
3327 gen_int_shift_amount (mode, 8), NULL_RTX,
3328 unsignedp, OPTAB_WIDEN);
3329 if (temp1 && temp2)
3331 temp = expand_binop (mode, ior_optab, temp1, temp2, target,
3332 unsignedp, OPTAB_WIDEN);
3333 if (temp)
3334 return temp;
3337 delete_insns_since (last);
3340 if (is_a <scalar_int_mode> (mode, &int_mode))
3342 temp = widen_bswap (int_mode, op0, target);
3343 if (temp)
3344 return temp;
3346 /* We do not provide a 128-bit bswap in libgcc so force the use of
3347 a double bswap for 64-bit targets. */
3348 if (GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
3349 && (UNITS_PER_WORD == 8
3350 || optab_handler (unoptab, word_mode) != CODE_FOR_nothing))
3352 temp = expand_doubleword_bswap (mode, op0, target);
3353 if (temp)
3354 return temp;
3358 goto try_libcall;
3361 if (CLASS_HAS_WIDER_MODES_P (mclass))
3362 FOR_EACH_WIDER_MODE (wider_mode, mode)
3364 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
3366 rtx xop0 = op0;
3367 rtx_insn *last = get_last_insn ();
3369 /* For certain operations, we need not actually extend
3370 the narrow operand, as long as we will truncate the
3371 results to the same narrowness. */
3373 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3374 (unoptab == neg_optab
3375 || unoptab == one_cmpl_optab)
3376 && mclass == MODE_INT);
3378 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3379 unsignedp);
3381 if (temp)
3383 if (mclass != MODE_INT
3384 || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
3386 if (target == 0)
3387 target = gen_reg_rtx (mode);
3388 convert_move (target, temp, 0);
3389 return target;
3391 else
3392 return gen_lowpart (mode, temp);
3394 else
3395 delete_insns_since (last);
3399 /* These can be done a word at a time. */
3400 if (unoptab == one_cmpl_optab
3401 && is_int_mode (mode, &int_mode)
3402 && GET_MODE_SIZE (int_mode) > UNITS_PER_WORD
3403 && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
3405 int i;
3406 rtx_insn *insns;
3408 if (target == 0
3409 || target == op0
3410 || reg_overlap_mentioned_p (target, op0)
3411 || !valid_multiword_target_p (target))
3412 target = gen_reg_rtx (int_mode);
3414 start_sequence ();
3416 /* Do the actual arithmetic. */
3417 for (i = 0; i < GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD; i++)
3419 rtx target_piece = operand_subword (target, i, 1, int_mode);
3420 rtx x = expand_unop (word_mode, unoptab,
3421 operand_subword_force (op0, i, int_mode),
3422 target_piece, unsignedp);
3424 if (target_piece != x)
3425 emit_move_insn (target_piece, x);
3428 insns = get_insns ();
3429 end_sequence ();
3431 emit_insn (insns);
3432 return target;
3435 /* Emit ~op0 as op0 ^ -1. */
3436 if (unoptab == one_cmpl_optab
3437 && (SCALAR_INT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
3438 && optab_handler (xor_optab, mode) != CODE_FOR_nothing)
3440 temp = expand_binop (mode, xor_optab, op0, CONSTM1_RTX (mode),
3441 target, unsignedp, OPTAB_DIRECT);
3442 if (temp)
3443 return temp;
3446 if (optab_to_code (unoptab) == NEG)
3448 /* Try negating floating point values by flipping the sign bit. */
3449 if (is_a <scalar_float_mode> (mode, &float_mode))
3451 temp = expand_absneg_bit (NEG, float_mode, op0, target);
3452 if (temp)
3453 return temp;
3456 /* If there is no negation pattern, and we have no negative zero,
3457 try subtracting from zero. */
3458 if (!HONOR_SIGNED_ZEROS (mode))
3460 temp = expand_binop (mode, (unoptab == negv_optab
3461 ? subv_optab : sub_optab),
3462 CONST0_RTX (mode), op0, target,
3463 unsignedp, OPTAB_DIRECT);
3464 if (temp)
3465 return temp;
3469 /* Try calculating parity (x) as popcount (x) % 2. */
3470 if (unoptab == parity_optab && is_a <scalar_int_mode> (mode, &int_mode))
3472 temp = expand_parity (int_mode, op0, target);
3473 if (temp)
3474 return temp;
3477 /* Try implementing ffs (x) in terms of clz (x). */
3478 if (unoptab == ffs_optab && is_a <scalar_int_mode> (mode, &int_mode))
3480 temp = expand_ffs (int_mode, op0, target);
3481 if (temp)
3482 return temp;
3485 /* Try implementing ctz (x) in terms of clz (x). */
3486 if (unoptab == ctz_optab && is_a <scalar_int_mode> (mode, &int_mode))
3488 temp = expand_ctz (int_mode, op0, target);
3489 if (temp)
3490 return temp;
3493 try_libcall:
3494 /* Now try a library call in this mode. */
3495 libfunc = optab_libfunc (unoptab, mode);
3496 if (libfunc)
3498 rtx_insn *insns;
3499 rtx value;
3500 rtx eq_value;
3501 machine_mode outmode = mode;
3503 /* All of these functions return small values. Thus we choose to
3504 have them return something that isn't a double-word. */
3505 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
3506 || unoptab == clrsb_optab || unoptab == popcount_optab
3507 || unoptab == parity_optab)
3508 outmode
3509 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node),
3510 optab_libfunc (unoptab, mode)));
3512 start_sequence ();
3514 /* Pass 1 for NO_QUEUE so we don't lose any increments
3515 if the libcall is cse'd or moved. */
3516 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, outmode,
3517 op0, mode);
3518 insns = get_insns ();
3519 end_sequence ();
3521 target = gen_reg_rtx (outmode);
3522 bool trapv = trapv_unoptab_p (unoptab);
3523 if (trapv)
3524 eq_value = NULL_RTX;
3525 else
3527 eq_value = gen_rtx_fmt_e (optab_to_code (unoptab), mode, op0);
3528 if (GET_MODE_UNIT_SIZE (outmode) < GET_MODE_UNIT_SIZE (mode))
3529 eq_value = simplify_gen_unary (TRUNCATE, outmode, eq_value, mode);
3530 else if (GET_MODE_UNIT_SIZE (outmode) > GET_MODE_UNIT_SIZE (mode))
3531 eq_value = simplify_gen_unary (ZERO_EXTEND,
3532 outmode, eq_value, mode);
3534 emit_libcall_block_1 (insns, target, value, eq_value, trapv);
3536 return target;
3539 /* It can't be done in this mode. Can we do it in a wider mode? */
3541 if (CLASS_HAS_WIDER_MODES_P (mclass))
3543 FOR_EACH_WIDER_MODE (wider_mode, mode)
3545 if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing
3546 || optab_libfunc (unoptab, wider_mode))
3548 rtx xop0 = op0;
3549 rtx_insn *last = get_last_insn ();
3551 /* For certain operations, we need not actually extend
3552 the narrow operand, as long as we will truncate the
3553 results to the same narrowness. */
3554 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3555 (unoptab == neg_optab
3556 || unoptab == one_cmpl_optab
3557 || unoptab == bswap_optab)
3558 && mclass == MODE_INT);
3560 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3561 unsignedp);
3563 /* If we are generating clz using wider mode, adjust the
3564 result. Similarly for clrsb. */
3565 if ((unoptab == clz_optab || unoptab == clrsb_optab)
3566 && temp != 0)
3568 scalar_int_mode wider_int_mode
3569 = as_a <scalar_int_mode> (wider_mode);
3570 int_mode = as_a <scalar_int_mode> (mode);
3571 temp = expand_binop
3572 (wider_mode, sub_optab, temp,
3573 gen_int_mode (GET_MODE_PRECISION (wider_int_mode)
3574 - GET_MODE_PRECISION (int_mode),
3575 wider_int_mode),
3576 target, true, OPTAB_DIRECT);
3579 /* Likewise for bswap. */
3580 if (unoptab == bswap_optab && temp != 0)
3582 scalar_int_mode wider_int_mode
3583 = as_a <scalar_int_mode> (wider_mode);
3584 int_mode = as_a <scalar_int_mode> (mode);
3585 gcc_assert (GET_MODE_PRECISION (wider_int_mode)
3586 == GET_MODE_BITSIZE (wider_int_mode)
3587 && GET_MODE_PRECISION (int_mode)
3588 == GET_MODE_BITSIZE (int_mode));
3590 temp = expand_shift (RSHIFT_EXPR, wider_int_mode, temp,
3591 GET_MODE_BITSIZE (wider_int_mode)
3592 - GET_MODE_BITSIZE (int_mode),
3593 NULL_RTX, true);
3596 if (temp)
3598 if (mclass != MODE_INT)
3600 if (target == 0)
3601 target = gen_reg_rtx (mode);
3602 convert_move (target, temp, 0);
3603 return target;
3605 else
3606 return gen_lowpart (mode, temp);
3608 else
3609 delete_insns_since (last);
3614 /* One final attempt at implementing negation via subtraction,
3615 this time allowing widening of the operand. */
3616 if (optab_to_code (unoptab) == NEG && !HONOR_SIGNED_ZEROS (mode))
3618 rtx temp;
3619 temp = expand_binop (mode,
3620 unoptab == negv_optab ? subv_optab : sub_optab,
3621 CONST0_RTX (mode), op0,
3622 target, unsignedp, OPTAB_LIB_WIDEN);
3623 if (temp)
3624 return temp;
3627 return 0;
3630 /* Emit code to compute the absolute value of OP0, with result to
3631 TARGET if convenient. (TARGET may be 0.) The return value says
3632 where the result actually is to be found.
3634 MODE is the mode of the operand; the mode of the result is
3635 different but can be deduced from MODE.
3640 expand_abs_nojump (machine_mode mode, rtx op0, rtx target,
3641 int result_unsignedp)
3643 rtx temp;
3645 if (GET_MODE_CLASS (mode) != MODE_INT
3646 || ! flag_trapv)
3647 result_unsignedp = 1;
3649 /* First try to do it with a special abs instruction. */
3650 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
3651 op0, target, 0);
3652 if (temp != 0)
3653 return temp;
3655 /* For floating point modes, try clearing the sign bit. */
3656 scalar_float_mode float_mode;
3657 if (is_a <scalar_float_mode> (mode, &float_mode))
3659 temp = expand_absneg_bit (ABS, float_mode, op0, target);
3660 if (temp)
3661 return temp;
3664 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3665 if (optab_handler (smax_optab, mode) != CODE_FOR_nothing
3666 && !HONOR_SIGNED_ZEROS (mode))
3668 rtx_insn *last = get_last_insn ();
3670 temp = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3671 op0, NULL_RTX, 0);
3672 if (temp != 0)
3673 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3674 OPTAB_WIDEN);
3676 if (temp != 0)
3677 return temp;
3679 delete_insns_since (last);
3682 /* If this machine has expensive jumps, we can do integer absolute
3683 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3684 where W is the width of MODE. */
3686 scalar_int_mode int_mode;
3687 if (is_int_mode (mode, &int_mode)
3688 && BRANCH_COST (optimize_insn_for_speed_p (),
3689 false) >= 2)
3691 rtx extended = expand_shift (RSHIFT_EXPR, int_mode, op0,
3692 GET_MODE_PRECISION (int_mode) - 1,
3693 NULL_RTX, 0);
3695 temp = expand_binop (int_mode, xor_optab, extended, op0, target, 0,
3696 OPTAB_LIB_WIDEN);
3697 if (temp != 0)
3698 temp = expand_binop (int_mode,
3699 result_unsignedp ? sub_optab : subv_optab,
3700 temp, extended, target, 0, OPTAB_LIB_WIDEN);
3702 if (temp != 0)
3703 return temp;
3706 return NULL_RTX;
3710 expand_abs (machine_mode mode, rtx op0, rtx target,
3711 int result_unsignedp, int safe)
3713 rtx temp;
3714 rtx_code_label *op1;
3716 if (GET_MODE_CLASS (mode) != MODE_INT
3717 || ! flag_trapv)
3718 result_unsignedp = 1;
3720 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
3721 if (temp != 0)
3722 return temp;
3724 /* If that does not win, use conditional jump and negate. */
3726 /* It is safe to use the target if it is the same
3727 as the source if this is also a pseudo register */
3728 if (op0 == target && REG_P (op0)
3729 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
3730 safe = 1;
3732 op1 = gen_label_rtx ();
3733 if (target == 0 || ! safe
3734 || GET_MODE (target) != mode
3735 || (MEM_P (target) && MEM_VOLATILE_P (target))
3736 || (REG_P (target)
3737 && REGNO (target) < FIRST_PSEUDO_REGISTER))
3738 target = gen_reg_rtx (mode);
3740 emit_move_insn (target, op0);
3741 NO_DEFER_POP;
3743 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
3744 NULL_RTX, NULL, op1,
3745 profile_probability::uninitialized ());
3747 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3748 target, target, 0);
3749 if (op0 != target)
3750 emit_move_insn (target, op0);
3751 emit_label (op1);
3752 OK_DEFER_POP;
3753 return target;
3756 /* Emit code to compute the one's complement absolute value of OP0
3757 (if (OP0 < 0) OP0 = ~OP0), with result to TARGET if convenient.
3758 (TARGET may be NULL_RTX.) The return value says where the result
3759 actually is to be found.
3761 MODE is the mode of the operand; the mode of the result is
3762 different but can be deduced from MODE. */
3765 expand_one_cmpl_abs_nojump (machine_mode mode, rtx op0, rtx target)
3767 rtx temp;
3769 /* Not applicable for floating point modes. */
3770 if (FLOAT_MODE_P (mode))
3771 return NULL_RTX;
3773 /* If we have a MAX insn, we can do this as MAX (x, ~x). */
3774 if (optab_handler (smax_optab, mode) != CODE_FOR_nothing)
3776 rtx_insn *last = get_last_insn ();
3778 temp = expand_unop (mode, one_cmpl_optab, op0, NULL_RTX, 0);
3779 if (temp != 0)
3780 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3781 OPTAB_WIDEN);
3783 if (temp != 0)
3784 return temp;
3786 delete_insns_since (last);
3789 /* If this machine has expensive jumps, we can do one's complement
3790 absolute value of X as (((signed) x >> (W-1)) ^ x). */
3792 scalar_int_mode int_mode;
3793 if (is_int_mode (mode, &int_mode)
3794 && BRANCH_COST (optimize_insn_for_speed_p (),
3795 false) >= 2)
3797 rtx extended = expand_shift (RSHIFT_EXPR, int_mode, op0,
3798 GET_MODE_PRECISION (int_mode) - 1,
3799 NULL_RTX, 0);
3801 temp = expand_binop (int_mode, xor_optab, extended, op0, target, 0,
3802 OPTAB_LIB_WIDEN);
3804 if (temp != 0)
3805 return temp;
3808 return NULL_RTX;
3811 /* A subroutine of expand_copysign, perform the copysign operation using the
3812 abs and neg primitives advertised to exist on the target. The assumption
3813 is that we have a split register file, and leaving op0 in fp registers,
3814 and not playing with subregs so much, will help the register allocator. */
3816 static rtx
3817 expand_copysign_absneg (scalar_float_mode mode, rtx op0, rtx op1, rtx target,
3818 int bitpos, bool op0_is_abs)
3820 scalar_int_mode imode;
3821 enum insn_code icode;
3822 rtx sign;
3823 rtx_code_label *label;
3825 if (target == op1)
3826 target = NULL_RTX;
3828 /* Check if the back end provides an insn that handles signbit for the
3829 argument's mode. */
3830 icode = optab_handler (signbit_optab, mode);
3831 if (icode != CODE_FOR_nothing)
3833 imode = as_a <scalar_int_mode> (insn_data[(int) icode].operand[0].mode);
3834 sign = gen_reg_rtx (imode);
3835 emit_unop_insn (icode, sign, op1, UNKNOWN);
3837 else
3839 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3841 if (!int_mode_for_mode (mode).exists (&imode))
3842 return NULL_RTX;
3843 op1 = gen_lowpart (imode, op1);
3845 else
3847 int word;
3849 imode = word_mode;
3850 if (FLOAT_WORDS_BIG_ENDIAN)
3851 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3852 else
3853 word = bitpos / BITS_PER_WORD;
3854 bitpos = bitpos % BITS_PER_WORD;
3855 op1 = operand_subword_force (op1, word, mode);
3858 wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
3859 sign = expand_binop (imode, and_optab, op1,
3860 immed_wide_int_const (mask, imode),
3861 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3864 if (!op0_is_abs)
3866 op0 = expand_unop (mode, abs_optab, op0, target, 0);
3867 if (op0 == NULL)
3868 return NULL_RTX;
3869 target = op0;
3871 else
3873 if (target == NULL_RTX)
3874 target = copy_to_reg (op0);
3875 else
3876 emit_move_insn (target, op0);
3879 label = gen_label_rtx ();
3880 emit_cmp_and_jump_insns (sign, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3882 if (CONST_DOUBLE_AS_FLOAT_P (op0))
3883 op0 = simplify_unary_operation (NEG, mode, op0, mode);
3884 else
3885 op0 = expand_unop (mode, neg_optab, op0, target, 0);
3886 if (op0 != target)
3887 emit_move_insn (target, op0);
3889 emit_label (label);
3891 return target;
3895 /* A subroutine of expand_copysign, perform the entire copysign operation
3896 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3897 is true if op0 is known to have its sign bit clear. */
3899 static rtx
3900 expand_copysign_bit (scalar_float_mode mode, rtx op0, rtx op1, rtx target,
3901 int bitpos, bool op0_is_abs)
3903 scalar_int_mode imode;
3904 int word, nwords, i;
3905 rtx temp;
3906 rtx_insn *insns;
3908 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3910 if (!int_mode_for_mode (mode).exists (&imode))
3911 return NULL_RTX;
3912 word = 0;
3913 nwords = 1;
3915 else
3917 imode = word_mode;
3919 if (FLOAT_WORDS_BIG_ENDIAN)
3920 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3921 else
3922 word = bitpos / BITS_PER_WORD;
3923 bitpos = bitpos % BITS_PER_WORD;
3924 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3927 wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
3929 if (target == 0
3930 || target == op0
3931 || target == op1
3932 || reg_overlap_mentioned_p (target, op0)
3933 || reg_overlap_mentioned_p (target, op1)
3934 || (nwords > 1 && !valid_multiword_target_p (target)))
3935 target = gen_reg_rtx (mode);
3937 if (nwords > 1)
3939 start_sequence ();
3941 for (i = 0; i < nwords; ++i)
3943 rtx targ_piece = operand_subword (target, i, 1, mode);
3944 rtx op0_piece = operand_subword_force (op0, i, mode);
3946 if (i == word)
3948 if (!op0_is_abs)
3949 op0_piece
3950 = expand_binop (imode, and_optab, op0_piece,
3951 immed_wide_int_const (~mask, imode),
3952 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3953 op1 = expand_binop (imode, and_optab,
3954 operand_subword_force (op1, i, mode),
3955 immed_wide_int_const (mask, imode),
3956 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3958 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3959 targ_piece, 1, OPTAB_LIB_WIDEN);
3960 if (temp != targ_piece)
3961 emit_move_insn (targ_piece, temp);
3963 else
3964 emit_move_insn (targ_piece, op0_piece);
3967 insns = get_insns ();
3968 end_sequence ();
3970 emit_insn (insns);
3972 else
3974 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3975 immed_wide_int_const (mask, imode),
3976 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3978 op0 = gen_lowpart (imode, op0);
3979 if (!op0_is_abs)
3980 op0 = expand_binop (imode, and_optab, op0,
3981 immed_wide_int_const (~mask, imode),
3982 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3984 temp = expand_binop (imode, ior_optab, op0, op1,
3985 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3986 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3989 return target;
3992 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3993 scalar floating point mode. Return NULL if we do not know how to
3994 expand the operation inline. */
3997 expand_copysign (rtx op0, rtx op1, rtx target)
3999 scalar_float_mode mode;
4000 const struct real_format *fmt;
4001 bool op0_is_abs;
4002 rtx temp;
4004 mode = as_a <scalar_float_mode> (GET_MODE (op0));
4005 gcc_assert (GET_MODE (op1) == mode);
4007 /* First try to do it with a special instruction. */
4008 temp = expand_binop (mode, copysign_optab, op0, op1,
4009 target, 0, OPTAB_DIRECT);
4010 if (temp)
4011 return temp;
4013 fmt = REAL_MODE_FORMAT (mode);
4014 if (fmt == NULL || !fmt->has_signed_zero)
4015 return NULL_RTX;
4017 op0_is_abs = false;
4018 if (CONST_DOUBLE_AS_FLOAT_P (op0))
4020 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
4021 op0 = simplify_unary_operation (ABS, mode, op0, mode);
4022 op0_is_abs = true;
4025 if (fmt->signbit_ro >= 0
4026 && (CONST_DOUBLE_AS_FLOAT_P (op0)
4027 || (optab_handler (neg_optab, mode) != CODE_FOR_nothing
4028 && optab_handler (abs_optab, mode) != CODE_FOR_nothing)))
4030 temp = expand_copysign_absneg (mode, op0, op1, target,
4031 fmt->signbit_ro, op0_is_abs);
4032 if (temp)
4033 return temp;
4036 if (fmt->signbit_rw < 0)
4037 return NULL_RTX;
4038 return expand_copysign_bit (mode, op0, op1, target,
4039 fmt->signbit_rw, op0_is_abs);
4042 /* Generate an instruction whose insn-code is INSN_CODE,
4043 with two operands: an output TARGET and an input OP0.
4044 TARGET *must* be nonzero, and the output is always stored there.
4045 CODE is an rtx code such that (CODE OP0) is an rtx that describes
4046 the value that is stored into TARGET.
4048 Return false if expansion failed. */
4050 bool
4051 maybe_emit_unop_insn (enum insn_code icode, rtx target, rtx op0,
4052 enum rtx_code code)
4054 class expand_operand ops[2];
4055 rtx_insn *pat;
4057 create_output_operand (&ops[0], target, GET_MODE (target));
4058 create_input_operand (&ops[1], op0, GET_MODE (op0));
4059 pat = maybe_gen_insn (icode, 2, ops);
4060 if (!pat)
4061 return false;
4063 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
4064 && code != UNKNOWN)
4065 add_equal_note (pat, ops[0].value, code, ops[1].value, NULL_RTX,
4066 GET_MODE (op0));
4068 emit_insn (pat);
4070 if (ops[0].value != target)
4071 emit_move_insn (target, ops[0].value);
4072 return true;
4074 /* Generate an instruction whose insn-code is INSN_CODE,
4075 with two operands: an output TARGET and an input OP0.
4076 TARGET *must* be nonzero, and the output is always stored there.
4077 CODE is an rtx code such that (CODE OP0) is an rtx that describes
4078 the value that is stored into TARGET. */
4080 void
4081 emit_unop_insn (enum insn_code icode, rtx target, rtx op0, enum rtx_code code)
4083 bool ok = maybe_emit_unop_insn (icode, target, op0, code);
4084 gcc_assert (ok);
4087 struct no_conflict_data
4089 rtx target;
4090 rtx_insn *first, *insn;
4091 bool must_stay;
4094 /* Called via note_stores by emit_libcall_block. Set P->must_stay if
4095 the currently examined clobber / store has to stay in the list of
4096 insns that constitute the actual libcall block. */
4097 static void
4098 no_conflict_move_test (rtx dest, const_rtx set, void *p0)
4100 struct no_conflict_data *p= (struct no_conflict_data *) p0;
4102 /* If this inns directly contributes to setting the target, it must stay. */
4103 if (reg_overlap_mentioned_p (p->target, dest))
4104 p->must_stay = true;
4105 /* If we haven't committed to keeping any other insns in the list yet,
4106 there is nothing more to check. */
4107 else if (p->insn == p->first)
4108 return;
4109 /* If this insn sets / clobbers a register that feeds one of the insns
4110 already in the list, this insn has to stay too. */
4111 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
4112 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
4113 || reg_used_between_p (dest, p->first, p->insn)
4114 /* Likewise if this insn depends on a register set by a previous
4115 insn in the list, or if it sets a result (presumably a hard
4116 register) that is set or clobbered by a previous insn.
4117 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
4118 SET_DEST perform the former check on the address, and the latter
4119 check on the MEM. */
4120 || (GET_CODE (set) == SET
4121 && (modified_in_p (SET_SRC (set), p->first)
4122 || modified_in_p (SET_DEST (set), p->first)
4123 || modified_between_p (SET_SRC (set), p->first, p->insn)
4124 || modified_between_p (SET_DEST (set), p->first, p->insn))))
4125 p->must_stay = true;
4129 /* Emit code to make a call to a constant function or a library call.
4131 INSNS is a list containing all insns emitted in the call.
4132 These insns leave the result in RESULT. Our block is to copy RESULT
4133 to TARGET, which is logically equivalent to EQUIV.
4135 We first emit any insns that set a pseudo on the assumption that these are
4136 loading constants into registers; doing so allows them to be safely cse'ed
4137 between blocks. Then we emit all the other insns in the block, followed by
4138 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
4139 note with an operand of EQUIV. */
4141 static void
4142 emit_libcall_block_1 (rtx_insn *insns, rtx target, rtx result, rtx equiv,
4143 bool equiv_may_trap)
4145 rtx final_dest = target;
4146 rtx_insn *next, *last, *insn;
4148 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
4149 into a MEM later. Protect the libcall block from this change. */
4150 if (! REG_P (target) || REG_USERVAR_P (target))
4151 target = gen_reg_rtx (GET_MODE (target));
4153 /* If we're using non-call exceptions, a libcall corresponding to an
4154 operation that may trap may also trap. */
4155 /* ??? See the comment in front of make_reg_eh_region_note. */
4156 if (cfun->can_throw_non_call_exceptions
4157 && (equiv_may_trap || may_trap_p (equiv)))
4159 for (insn = insns; insn; insn = NEXT_INSN (insn))
4160 if (CALL_P (insn))
4162 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
4163 if (note)
4165 int lp_nr = INTVAL (XEXP (note, 0));
4166 if (lp_nr == 0 || lp_nr == INT_MIN)
4167 remove_note (insn, note);
4171 else
4173 /* Look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
4174 reg note to indicate that this call cannot throw or execute a nonlocal
4175 goto (unless there is already a REG_EH_REGION note, in which case
4176 we update it). */
4177 for (insn = insns; insn; insn = NEXT_INSN (insn))
4178 if (CALL_P (insn))
4179 make_reg_eh_region_note_nothrow_nononlocal (insn);
4182 /* First emit all insns that set pseudos. Remove them from the list as
4183 we go. Avoid insns that set pseudos which were referenced in previous
4184 insns. These can be generated by move_by_pieces, for example,
4185 to update an address. Similarly, avoid insns that reference things
4186 set in previous insns. */
4188 for (insn = insns; insn; insn = next)
4190 rtx set = single_set (insn);
4192 next = NEXT_INSN (insn);
4194 if (set != 0 && REG_P (SET_DEST (set))
4195 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
4197 struct no_conflict_data data;
4199 data.target = const0_rtx;
4200 data.first = insns;
4201 data.insn = insn;
4202 data.must_stay = 0;
4203 note_stores (insn, no_conflict_move_test, &data);
4204 if (! data.must_stay)
4206 if (PREV_INSN (insn))
4207 SET_NEXT_INSN (PREV_INSN (insn)) = next;
4208 else
4209 insns = next;
4211 if (next)
4212 SET_PREV_INSN (next) = PREV_INSN (insn);
4214 add_insn (insn);
4218 /* Some ports use a loop to copy large arguments onto the stack.
4219 Don't move anything outside such a loop. */
4220 if (LABEL_P (insn))
4221 break;
4224 /* Write the remaining insns followed by the final copy. */
4225 for (insn = insns; insn; insn = next)
4227 next = NEXT_INSN (insn);
4229 add_insn (insn);
4232 last = emit_move_insn (target, result);
4233 if (equiv)
4234 set_dst_reg_note (last, REG_EQUAL, copy_rtx (equiv), target);
4236 if (final_dest != target)
4237 emit_move_insn (final_dest, target);
4240 void
4241 emit_libcall_block (rtx_insn *insns, rtx target, rtx result, rtx equiv)
4243 emit_libcall_block_1 (insns, target, result, equiv, false);
4246 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
4247 PURPOSE describes how this comparison will be used. CODE is the rtx
4248 comparison code we will be using.
4250 ??? Actually, CODE is slightly weaker than that. A target is still
4251 required to implement all of the normal bcc operations, but not
4252 required to implement all (or any) of the unordered bcc operations. */
4255 can_compare_p (enum rtx_code code, machine_mode mode,
4256 enum can_compare_purpose purpose)
4258 rtx test;
4259 test = gen_rtx_fmt_ee (code, mode, const0_rtx, const0_rtx);
4262 enum insn_code icode;
4264 if (purpose == ccp_jump
4265 && (icode = optab_handler (cbranch_optab, mode)) != CODE_FOR_nothing
4266 && insn_operand_matches (icode, 0, test))
4267 return 1;
4268 if (purpose == ccp_store_flag
4269 && (icode = optab_handler (cstore_optab, mode)) != CODE_FOR_nothing
4270 && insn_operand_matches (icode, 1, test))
4271 return 1;
4272 if (purpose == ccp_cmov
4273 && optab_handler (cmov_optab, mode) != CODE_FOR_nothing)
4274 return 1;
4276 mode = GET_MODE_WIDER_MODE (mode).else_void ();
4277 PUT_MODE (test, mode);
4279 while (mode != VOIDmode);
4281 return 0;
4284 /* Return whether RTL code CODE corresponds to an unsigned optab. */
4286 static bool
4287 unsigned_optab_p (enum rtx_code code)
4289 return code == LTU || code == LEU || code == GTU || code == GEU;
4292 /* Return whether the backend-emitted comparison for code CODE, comparing
4293 operands of mode VALUE_MODE and producing a result with MASK_MODE, matches
4294 operand OPNO of pattern ICODE. */
4296 static bool
4297 insn_predicate_matches_p (enum insn_code icode, unsigned int opno,
4298 enum rtx_code code, machine_mode mask_mode,
4299 machine_mode value_mode)
4301 rtx reg1 = alloca_raw_REG (value_mode, LAST_VIRTUAL_REGISTER + 1);
4302 rtx reg2 = alloca_raw_REG (value_mode, LAST_VIRTUAL_REGISTER + 2);
4303 rtx test = alloca_rtx_fmt_ee (code, mask_mode, reg1, reg2);
4304 return insn_operand_matches (icode, opno, test);
4307 /* Return whether the backend can emit a vector comparison (vec_cmp/vec_cmpu)
4308 for code CODE, comparing operands of mode VALUE_MODE and producing a result
4309 with MASK_MODE. */
4311 bool
4312 can_vec_cmp_compare_p (enum rtx_code code, machine_mode value_mode,
4313 machine_mode mask_mode)
4315 enum insn_code icode
4316 = get_vec_cmp_icode (value_mode, mask_mode, unsigned_optab_p (code));
4317 if (icode == CODE_FOR_nothing)
4318 return false;
4320 return insn_predicate_matches_p (icode, 1, code, mask_mode, value_mode);
4323 /* Return whether the backend can emit a vector comparison (vcond/vcondu) for
4324 code CODE, comparing operands of mode CMP_OP_MODE and producing a result
4325 with VALUE_MODE. */
4327 bool
4328 can_vcond_compare_p (enum rtx_code code, machine_mode value_mode,
4329 machine_mode cmp_op_mode)
4331 enum insn_code icode
4332 = get_vcond_icode (value_mode, cmp_op_mode, unsigned_optab_p (code));
4333 if (icode == CODE_FOR_nothing)
4334 return false;
4336 return insn_predicate_matches_p (icode, 3, code, value_mode, cmp_op_mode);
4339 /* Return whether the backend can emit vector set instructions for inserting
4340 element into vector at variable index position. */
4342 bool
4343 can_vec_set_var_idx_p (machine_mode vec_mode)
4345 if (!VECTOR_MODE_P (vec_mode))
4346 return false;
4348 machine_mode inner_mode = GET_MODE_INNER (vec_mode);
4350 rtx reg1 = alloca_raw_REG (vec_mode, LAST_VIRTUAL_REGISTER + 1);
4351 rtx reg2 = alloca_raw_REG (inner_mode, LAST_VIRTUAL_REGISTER + 2);
4353 enum insn_code icode = optab_handler (vec_set_optab, vec_mode);
4355 const struct insn_data_d *data = &insn_data[icode];
4356 machine_mode idx_mode = data->operand[2].mode;
4358 rtx reg3 = alloca_raw_REG (idx_mode, LAST_VIRTUAL_REGISTER + 3);
4360 return icode != CODE_FOR_nothing && insn_operand_matches (icode, 0, reg1)
4361 && insn_operand_matches (icode, 1, reg2)
4362 && insn_operand_matches (icode, 2, reg3);
4365 /* This function is called when we are going to emit a compare instruction that
4366 compares the values found in X and Y, using the rtl operator COMPARISON.
4368 If they have mode BLKmode, then SIZE specifies the size of both operands.
4370 UNSIGNEDP nonzero says that the operands are unsigned;
4371 this matters if they need to be widened (as given by METHODS).
4373 *PTEST is where the resulting comparison RTX is returned or NULL_RTX
4374 if we failed to produce one.
4376 *PMODE is the mode of the inputs (in case they are const_int).
4378 This function performs all the setup necessary so that the caller only has
4379 to emit a single comparison insn. This setup can involve doing a BLKmode
4380 comparison or emitting a library call to perform the comparison if no insn
4381 is available to handle it.
4382 The values which are passed in through pointers can be modified; the caller
4383 should perform the comparison on the modified values. Constant
4384 comparisons must have already been folded. */
4386 static void
4387 prepare_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
4388 int unsignedp, enum optab_methods methods,
4389 rtx *ptest, machine_mode *pmode)
4391 machine_mode mode = *pmode;
4392 rtx libfunc, test;
4393 machine_mode cmp_mode;
4395 /* The other methods are not needed. */
4396 gcc_assert (methods == OPTAB_DIRECT || methods == OPTAB_WIDEN
4397 || methods == OPTAB_LIB_WIDEN);
4399 if (CONST_SCALAR_INT_P (y))
4400 canonicalize_comparison (mode, &comparison, &y);
4402 /* If we are optimizing, force expensive constants into a register. */
4403 if (CONSTANT_P (x) && optimize
4404 && (rtx_cost (x, mode, COMPARE, 0, optimize_insn_for_speed_p ())
4405 > COSTS_N_INSNS (1))
4406 && can_create_pseudo_p ())
4407 x = force_reg (mode, x);
4409 if (CONSTANT_P (y) && optimize
4410 && (rtx_cost (y, mode, COMPARE, 1, optimize_insn_for_speed_p ())
4411 > COSTS_N_INSNS (1))
4412 && can_create_pseudo_p ())
4413 y = force_reg (mode, y);
4415 /* Don't let both operands fail to indicate the mode. */
4416 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
4417 x = force_reg (mode, x);
4418 if (mode == VOIDmode)
4419 mode = GET_MODE (x) != VOIDmode ? GET_MODE (x) : GET_MODE (y);
4421 /* Handle all BLKmode compares. */
4423 if (mode == BLKmode)
4425 machine_mode result_mode;
4426 enum insn_code cmp_code;
4427 rtx result;
4428 rtx opalign
4429 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
4431 gcc_assert (size);
4433 /* Try to use a memory block compare insn - either cmpstr
4434 or cmpmem will do. */
4435 opt_scalar_int_mode cmp_mode_iter;
4436 FOR_EACH_MODE_IN_CLASS (cmp_mode_iter, MODE_INT)
4438 scalar_int_mode cmp_mode = cmp_mode_iter.require ();
4439 cmp_code = direct_optab_handler (cmpmem_optab, cmp_mode);
4440 if (cmp_code == CODE_FOR_nothing)
4441 cmp_code = direct_optab_handler (cmpstr_optab, cmp_mode);
4442 if (cmp_code == CODE_FOR_nothing)
4443 cmp_code = direct_optab_handler (cmpstrn_optab, cmp_mode);
4444 if (cmp_code == CODE_FOR_nothing)
4445 continue;
4447 /* Must make sure the size fits the insn's mode. */
4448 if (CONST_INT_P (size)
4449 ? UINTVAL (size) > GET_MODE_MASK (cmp_mode)
4450 : (GET_MODE_BITSIZE (as_a <scalar_int_mode> (GET_MODE (size)))
4451 > GET_MODE_BITSIZE (cmp_mode)))
4452 continue;
4454 result_mode = insn_data[cmp_code].operand[0].mode;
4455 result = gen_reg_rtx (result_mode);
4456 size = convert_to_mode (cmp_mode, size, 1);
4457 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
4459 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, result, const0_rtx);
4460 *pmode = result_mode;
4461 return;
4464 if (methods != OPTAB_LIB && methods != OPTAB_LIB_WIDEN)
4465 goto fail;
4467 /* Otherwise call a library function. */
4468 result = emit_block_comp_via_libcall (x, y, size);
4470 x = result;
4471 y = const0_rtx;
4472 mode = TYPE_MODE (integer_type_node);
4473 methods = OPTAB_LIB_WIDEN;
4474 unsignedp = false;
4477 /* Don't allow operands to the compare to trap, as that can put the
4478 compare and branch in different basic blocks. */
4479 if (cfun->can_throw_non_call_exceptions)
4481 if (!can_create_pseudo_p () && (may_trap_p (x) || may_trap_p (y)))
4482 goto fail;
4483 if (may_trap_p (x))
4484 x = copy_to_reg (x);
4485 if (may_trap_p (y))
4486 y = copy_to_reg (y);
4489 if (GET_MODE_CLASS (mode) == MODE_CC)
4491 enum insn_code icode = optab_handler (cbranch_optab, CCmode);
4492 test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
4493 if (icode != CODE_FOR_nothing
4494 && insn_operand_matches (icode, 0, test))
4496 *ptest = test;
4497 return;
4499 else
4500 goto fail;
4503 test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
4504 FOR_EACH_WIDER_MODE_FROM (cmp_mode, mode)
4506 enum insn_code icode;
4507 icode = optab_handler (cbranch_optab, cmp_mode);
4508 if (icode != CODE_FOR_nothing
4509 && insn_operand_matches (icode, 0, test))
4511 rtx_insn *last = get_last_insn ();
4512 rtx op0 = prepare_operand (icode, x, 1, mode, cmp_mode, unsignedp);
4513 rtx op1 = prepare_operand (icode, y, 2, mode, cmp_mode, unsignedp);
4514 if (op0 && op1
4515 && insn_operand_matches (icode, 1, op0)
4516 && insn_operand_matches (icode, 2, op1))
4518 XEXP (test, 0) = op0;
4519 XEXP (test, 1) = op1;
4520 *ptest = test;
4521 *pmode = cmp_mode;
4522 return;
4524 delete_insns_since (last);
4527 if (methods == OPTAB_DIRECT)
4528 break;
4531 if (methods != OPTAB_LIB_WIDEN)
4532 goto fail;
4534 if (SCALAR_FLOAT_MODE_P (mode))
4536 /* Small trick if UNORDERED isn't implemented by the hardware. */
4537 if (comparison == UNORDERED && rtx_equal_p (x, y))
4539 prepare_cmp_insn (x, y, UNLT, NULL_RTX, unsignedp, OPTAB_WIDEN,
4540 ptest, pmode);
4541 if (*ptest)
4542 return;
4545 prepare_float_lib_cmp (x, y, comparison, ptest, pmode);
4547 else
4549 rtx result;
4550 machine_mode ret_mode;
4552 /* Handle a libcall just for the mode we are using. */
4553 libfunc = optab_libfunc (cmp_optab, mode);
4554 gcc_assert (libfunc);
4556 /* If we want unsigned, and this mode has a distinct unsigned
4557 comparison routine, use that. */
4558 if (unsignedp)
4560 rtx ulibfunc = optab_libfunc (ucmp_optab, mode);
4561 if (ulibfunc)
4562 libfunc = ulibfunc;
4565 ret_mode = targetm.libgcc_cmp_return_mode ();
4566 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4567 ret_mode, x, mode, y, mode);
4569 /* There are two kinds of comparison routines. Biased routines
4570 return 0/1/2, and unbiased routines return -1/0/1. Other parts
4571 of gcc expect that the comparison operation is equivalent
4572 to the modified comparison. For signed comparisons compare the
4573 result against 1 in the biased case, and zero in the unbiased
4574 case. For unsigned comparisons always compare against 1 after
4575 biasing the unbiased result by adding 1. This gives us a way to
4576 represent LTU.
4577 The comparisons in the fixed-point helper library are always
4578 biased. */
4579 x = result;
4580 y = const1_rtx;
4582 if (!TARGET_LIB_INT_CMP_BIASED && !ALL_FIXED_POINT_MODE_P (mode))
4584 if (unsignedp)
4585 x = plus_constant (ret_mode, result, 1);
4586 else
4587 y = const0_rtx;
4590 *pmode = ret_mode;
4591 prepare_cmp_insn (x, y, comparison, NULL_RTX, unsignedp, methods,
4592 ptest, pmode);
4595 return;
4597 fail:
4598 *ptest = NULL_RTX;
4601 /* Before emitting an insn with code ICODE, make sure that X, which is going
4602 to be used for operand OPNUM of the insn, is converted from mode MODE to
4603 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
4604 that it is accepted by the operand predicate. Return the new value. */
4607 prepare_operand (enum insn_code icode, rtx x, int opnum, machine_mode mode,
4608 machine_mode wider_mode, int unsignedp)
4610 if (mode != wider_mode)
4611 x = convert_modes (wider_mode, mode, x, unsignedp);
4613 if (!insn_operand_matches (icode, opnum, x))
4615 machine_mode op_mode = insn_data[(int) icode].operand[opnum].mode;
4616 if (reload_completed)
4617 return NULL_RTX;
4618 if (GET_MODE (x) != op_mode && GET_MODE (x) != VOIDmode)
4619 return NULL_RTX;
4620 x = copy_to_mode_reg (op_mode, x);
4623 return x;
4626 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
4627 we can do the branch. */
4629 static void
4630 emit_cmp_and_jump_insn_1 (rtx test, machine_mode mode, rtx label,
4631 direct_optab cmp_optab, profile_probability prob,
4632 bool test_branch)
4634 machine_mode optab_mode;
4635 enum mode_class mclass;
4636 enum insn_code icode;
4637 rtx_insn *insn;
4639 mclass = GET_MODE_CLASS (mode);
4640 optab_mode = (mclass == MODE_CC) ? CCmode : mode;
4641 icode = optab_handler (cmp_optab, optab_mode);
4643 gcc_assert (icode != CODE_FOR_nothing);
4644 gcc_assert (test_branch || insn_operand_matches (icode, 0, test));
4645 if (test_branch)
4646 insn = emit_jump_insn (GEN_FCN (icode) (XEXP (test, 0),
4647 XEXP (test, 1), label));
4648 else
4649 insn = emit_jump_insn (GEN_FCN (icode) (test, XEXP (test, 0),
4650 XEXP (test, 1), label));
4652 if (prob.initialized_p ()
4653 && profile_status_for_fn (cfun) != PROFILE_ABSENT
4654 && insn
4655 && JUMP_P (insn)
4656 && any_condjump_p (insn)
4657 && !find_reg_note (insn, REG_BR_PROB, 0))
4658 add_reg_br_prob_note (insn, prob);
4661 /* PTEST points to a comparison that compares its first operand with zero.
4662 Check to see if it can be performed as a bit-test-and-branch instead.
4663 On success, return the instruction that performs the bit-test-and-branch
4664 and replace the second operand of *PTEST with the bit number to test.
4665 On failure, return CODE_FOR_nothing and leave *PTEST unchanged.
4667 Note that the comparison described by *PTEST should not be taken
4668 literally after a successful return. *PTEST is just a convenient
4669 place to store the two operands of the bit-and-test.
4671 VAL must contain the original tree expression for the first operand
4672 of *PTEST. */
4674 static enum insn_code
4675 validate_test_and_branch (tree val, rtx *ptest, machine_mode *pmode, optab *res)
4677 if (!val || TREE_CODE (val) != SSA_NAME)
4678 return CODE_FOR_nothing;
4680 machine_mode mode = TYPE_MODE (TREE_TYPE (val));
4681 rtx test = *ptest;
4682 direct_optab optab;
4684 if (GET_CODE (test) == EQ)
4685 optab = tbranch_eq_optab;
4686 else if (GET_CODE (test) == NE)
4687 optab = tbranch_ne_optab;
4688 else
4689 return CODE_FOR_nothing;
4691 *res = optab;
4693 /* If the target supports the testbit comparison directly, great. */
4694 auto icode = direct_optab_handler (optab, mode);
4695 if (icode == CODE_FOR_nothing)
4696 return icode;
4698 if (tree_zero_one_valued_p (val))
4700 auto pos = BITS_BIG_ENDIAN ? GET_MODE_BITSIZE (mode) - 1 : 0;
4701 XEXP (test, 1) = gen_int_mode (pos, mode);
4702 *ptest = test;
4703 *pmode = mode;
4704 return icode;
4707 wide_int wcst = get_nonzero_bits (val);
4708 if (wcst == -1)
4709 return CODE_FOR_nothing;
4711 int bitpos;
4713 if ((bitpos = wi::exact_log2 (wcst)) == -1)
4714 return CODE_FOR_nothing;
4716 auto pos = BITS_BIG_ENDIAN ? GET_MODE_BITSIZE (mode) - 1 - bitpos : bitpos;
4717 XEXP (test, 1) = gen_int_mode (pos, mode);
4718 *ptest = test;
4719 *pmode = mode;
4720 return icode;
4723 /* Generate code to compare X with Y so that the condition codes are
4724 set and to jump to LABEL if the condition is true. If X is a
4725 constant and Y is not a constant, then the comparison is swapped to
4726 ensure that the comparison RTL has the canonical form.
4728 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4729 need to be widened. UNSIGNEDP is also used to select the proper
4730 branch condition code.
4732 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4734 MODE is the mode of the inputs (in case they are const_int).
4736 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
4737 It will be potentially converted into an unsigned variant based on
4738 UNSIGNEDP to select a proper jump instruction.
4740 PROB is the probability of jumping to LABEL. If the comparison is against
4741 zero then VAL contains the expression from which the non-zero RTL is
4742 derived. */
4744 void
4745 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
4746 machine_mode mode, int unsignedp, tree val, rtx label,
4747 profile_probability prob)
4749 rtx op0 = x, op1 = y;
4750 rtx test;
4752 /* Swap operands and condition to ensure canonical RTL. */
4753 if (swap_commutative_operands_p (x, y)
4754 && can_compare_p (swap_condition (comparison), mode, ccp_jump))
4756 op0 = y, op1 = x;
4757 comparison = swap_condition (comparison);
4760 /* If OP0 is still a constant, then both X and Y must be constants
4761 or the opposite comparison is not supported. Force X into a register
4762 to create canonical RTL. */
4763 if (CONSTANT_P (op0))
4764 op0 = force_reg (mode, op0);
4766 if (unsignedp)
4767 comparison = unsigned_condition (comparison);
4769 prepare_cmp_insn (op0, op1, comparison, size, unsignedp, OPTAB_LIB_WIDEN,
4770 &test, &mode);
4772 /* Check if we're comparing a truth type with 0, and if so check if
4773 the target supports tbranch. */
4774 machine_mode tmode = mode;
4775 direct_optab optab;
4776 if (op1 == CONST0_RTX (GET_MODE (op1))
4777 && validate_test_and_branch (val, &test, &tmode,
4778 &optab) != CODE_FOR_nothing)
4780 emit_cmp_and_jump_insn_1 (test, tmode, label, optab, prob, true);
4781 return;
4784 emit_cmp_and_jump_insn_1 (test, mode, label, cbranch_optab, prob, false);
4787 /* Overloaded version of emit_cmp_and_jump_insns in which VAL is unknown. */
4789 void
4790 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
4791 machine_mode mode, int unsignedp, rtx label,
4792 profile_probability prob)
4794 emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, NULL,
4795 label, prob);
4799 /* Emit a library call comparison between floating point X and Y.
4800 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4802 static void
4803 prepare_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison,
4804 rtx *ptest, machine_mode *pmode)
4806 enum rtx_code swapped = swap_condition (comparison);
4807 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
4808 machine_mode orig_mode = GET_MODE (x);
4809 machine_mode mode;
4810 rtx true_rtx, false_rtx;
4811 rtx value, target, equiv;
4812 rtx_insn *insns;
4813 rtx libfunc = 0;
4814 bool reversed_p = false;
4815 scalar_int_mode cmp_mode = targetm.libgcc_cmp_return_mode ();
4817 FOR_EACH_WIDER_MODE_FROM (mode, orig_mode)
4819 if (code_to_optab (comparison)
4820 && (libfunc = optab_libfunc (code_to_optab (comparison), mode)))
4821 break;
4823 if (code_to_optab (swapped)
4824 && (libfunc = optab_libfunc (code_to_optab (swapped), mode)))
4826 std::swap (x, y);
4827 comparison = swapped;
4828 break;
4831 if (code_to_optab (reversed)
4832 && (libfunc = optab_libfunc (code_to_optab (reversed), mode)))
4834 comparison = reversed;
4835 reversed_p = true;
4836 break;
4840 gcc_assert (mode != VOIDmode);
4842 if (mode != orig_mode)
4844 x = convert_to_mode (mode, x, 0);
4845 y = convert_to_mode (mode, y, 0);
4848 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4849 the RTL. The allows the RTL optimizers to delete the libcall if the
4850 condition can be determined at compile-time. */
4851 if (comparison == UNORDERED
4852 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4854 true_rtx = const_true_rtx;
4855 false_rtx = const0_rtx;
4857 else
4859 switch (comparison)
4861 case EQ:
4862 true_rtx = const0_rtx;
4863 false_rtx = const_true_rtx;
4864 break;
4866 case NE:
4867 true_rtx = const_true_rtx;
4868 false_rtx = const0_rtx;
4869 break;
4871 case GT:
4872 true_rtx = const1_rtx;
4873 false_rtx = const0_rtx;
4874 break;
4876 case GE:
4877 true_rtx = const0_rtx;
4878 false_rtx = constm1_rtx;
4879 break;
4881 case LT:
4882 true_rtx = constm1_rtx;
4883 false_rtx = const0_rtx;
4884 break;
4886 case LE:
4887 true_rtx = const0_rtx;
4888 false_rtx = const1_rtx;
4889 break;
4891 default:
4892 gcc_unreachable ();
4896 if (comparison == UNORDERED)
4898 rtx temp = simplify_gen_relational (NE, cmp_mode, mode, x, x);
4899 equiv = simplify_gen_relational (NE, cmp_mode, mode, y, y);
4900 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4901 temp, const_true_rtx, equiv);
4903 else
4905 equiv = simplify_gen_relational (comparison, cmp_mode, mode, x, y);
4906 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4907 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4908 equiv, true_rtx, false_rtx);
4911 start_sequence ();
4912 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4913 cmp_mode, x, mode, y, mode);
4914 insns = get_insns ();
4915 end_sequence ();
4917 target = gen_reg_rtx (cmp_mode);
4918 emit_libcall_block (insns, target, value, equiv);
4920 if (comparison == UNORDERED
4921 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison)
4922 || reversed_p)
4923 *ptest = gen_rtx_fmt_ee (reversed_p ? EQ : NE, VOIDmode, target, false_rtx);
4924 else
4925 *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, target, const0_rtx);
4927 *pmode = cmp_mode;
4930 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4932 void
4933 emit_indirect_jump (rtx loc)
4935 if (!targetm.have_indirect_jump ())
4936 sorry ("indirect jumps are not available on this target");
4937 else
4939 class expand_operand ops[1];
4940 create_address_operand (&ops[0], loc);
4941 expand_jump_insn (targetm.code_for_indirect_jump, 1, ops);
4942 emit_barrier ();
4947 /* Emit a conditional move instruction if the machine supports one for that
4948 condition and machine mode.
4950 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4951 the mode to use should they be constants. If it is VOIDmode, they cannot
4952 both be constants.
4954 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4955 should be stored there. MODE is the mode to use should they be constants.
4956 If it is VOIDmode, they cannot both be constants.
4958 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4959 is not supported. */
4962 emit_conditional_move (rtx target, struct rtx_comparison comp,
4963 rtx op2, rtx op3,
4964 machine_mode mode, int unsignedp)
4966 rtx comparison;
4967 rtx_insn *last;
4968 enum insn_code icode;
4969 enum rtx_code reversed;
4971 /* If the two source operands are identical, that's just a move. */
4973 if (rtx_equal_p (op2, op3))
4975 if (!target)
4976 target = gen_reg_rtx (mode);
4978 emit_move_insn (target, op3);
4979 return target;
4982 /* If one operand is constant, make it the second one. Only do this
4983 if the other operand is not constant as well. */
4985 if (swap_commutative_operands_p (comp.op0, comp.op1))
4987 std::swap (comp.op0, comp.op1);
4988 comp.code = swap_condition (comp.code);
4991 /* get_condition will prefer to generate LT and GT even if the old
4992 comparison was against zero, so undo that canonicalization here since
4993 comparisons against zero are cheaper. */
4995 if (comp.code == LT && comp.op1 == const1_rtx)
4996 comp.code = LE, comp.op1 = const0_rtx;
4997 else if (comp.code == GT && comp.op1 == constm1_rtx)
4998 comp.code = GE, comp.op1 = const0_rtx;
5000 if (comp.mode == VOIDmode)
5001 comp.mode = GET_MODE (comp.op0);
5003 enum rtx_code orig_code = comp.code;
5004 bool swapped = false;
5005 if (swap_commutative_operands_p (op2, op3)
5006 && ((reversed =
5007 reversed_comparison_code_parts (comp.code, comp.op0, comp.op1, NULL))
5008 != UNKNOWN))
5010 std::swap (op2, op3);
5011 comp.code = reversed;
5012 swapped = true;
5015 if (mode == VOIDmode)
5016 mode = GET_MODE (op2);
5018 icode = direct_optab_handler (movcc_optab, mode);
5020 if (icode == CODE_FOR_nothing)
5021 return NULL_RTX;
5023 if (!target)
5024 target = gen_reg_rtx (mode);
5026 for (int pass = 0; ; pass++)
5028 comp.code = unsignedp ? unsigned_condition (comp.code) : comp.code;
5029 comparison =
5030 simplify_gen_relational (comp.code, VOIDmode,
5031 comp.mode, comp.op0, comp.op1);
5033 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
5034 punt and let the caller figure out how best to deal with this
5035 situation. */
5036 if (COMPARISON_P (comparison))
5038 saved_pending_stack_adjust save;
5039 save_pending_stack_adjust (&save);
5040 last = get_last_insn ();
5041 do_pending_stack_adjust ();
5042 machine_mode cmpmode = comp.mode;
5043 prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
5044 GET_CODE (comparison), NULL_RTX, unsignedp,
5045 OPTAB_WIDEN, &comparison, &cmpmode);
5046 if (comparison)
5048 rtx res = emit_conditional_move_1 (target, comparison,
5049 op2, op3, mode);
5050 if (res != NULL_RTX)
5051 return res;
5053 delete_insns_since (last);
5054 restore_pending_stack_adjust (&save);
5057 if (pass == 1)
5058 return NULL_RTX;
5060 /* If the preferred op2/op3 order is not usable, retry with other
5061 operand order, perhaps it will expand successfully. */
5062 if (swapped)
5063 comp.code = orig_code;
5064 else if ((reversed =
5065 reversed_comparison_code_parts (orig_code, comp.op0, comp.op1,
5066 NULL))
5067 != UNKNOWN)
5068 comp.code = reversed;
5069 else
5070 return NULL_RTX;
5071 std::swap (op2, op3);
5075 /* Helper function that, in addition to COMPARISON, also tries
5076 the reversed REV_COMPARISON with swapped OP2 and OP3. As opposed
5077 to when we pass the specific constituents of a comparison, no
5078 additional insns are emitted for it. It might still be necessary
5079 to emit more than one insn for the final conditional move, though. */
5082 emit_conditional_move (rtx target, rtx comparison, rtx rev_comparison,
5083 rtx op2, rtx op3, machine_mode mode)
5085 rtx res = emit_conditional_move_1 (target, comparison, op2, op3, mode);
5087 if (res != NULL_RTX)
5088 return res;
5090 return emit_conditional_move_1 (target, rev_comparison, op3, op2, mode);
5093 /* Helper for emitting a conditional move. */
5095 static rtx
5096 emit_conditional_move_1 (rtx target, rtx comparison,
5097 rtx op2, rtx op3, machine_mode mode)
5099 enum insn_code icode;
5101 if (comparison == NULL_RTX || !COMPARISON_P (comparison))
5102 return NULL_RTX;
5104 /* If the two source operands are identical, that's just a move.
5105 As the comparison comes in non-canonicalized, we must make
5106 sure not to discard any possible side effects. If there are
5107 side effects, just let the target handle it. */
5108 if (!side_effects_p (comparison) && rtx_equal_p (op2, op3))
5110 if (!target)
5111 target = gen_reg_rtx (mode);
5113 emit_move_insn (target, op3);
5114 return target;
5117 if (mode == VOIDmode)
5118 mode = GET_MODE (op2);
5120 icode = direct_optab_handler (movcc_optab, mode);
5122 if (icode == CODE_FOR_nothing)
5123 return NULL_RTX;
5125 if (!target)
5126 target = gen_reg_rtx (mode);
5128 class expand_operand ops[4];
5130 create_output_operand (&ops[0], target, mode);
5131 create_fixed_operand (&ops[1], comparison);
5132 create_input_operand (&ops[2], op2, mode);
5133 create_input_operand (&ops[3], op3, mode);
5135 if (maybe_expand_insn (icode, 4, ops))
5137 if (ops[0].value != target)
5138 convert_move (target, ops[0].value, false);
5139 return target;
5142 return NULL_RTX;
5146 /* Emit a conditional negate or bitwise complement using the
5147 negcc or notcc optabs if available. Return NULL_RTX if such operations
5148 are not available. Otherwise return the RTX holding the result.
5149 TARGET is the desired destination of the result. COMP is the comparison
5150 on which to negate. If COND is true move into TARGET the negation
5151 or bitwise complement of OP1. Otherwise move OP2 into TARGET.
5152 CODE is either NEG or NOT. MODE is the machine mode in which the
5153 operation is performed. */
5156 emit_conditional_neg_or_complement (rtx target, rtx_code code,
5157 machine_mode mode, rtx cond, rtx op1,
5158 rtx op2)
5160 optab op = unknown_optab;
5161 if (code == NEG)
5162 op = negcc_optab;
5163 else if (code == NOT)
5164 op = notcc_optab;
5165 else
5166 gcc_unreachable ();
5168 insn_code icode = direct_optab_handler (op, mode);
5170 if (icode == CODE_FOR_nothing)
5171 return NULL_RTX;
5173 if (!target)
5174 target = gen_reg_rtx (mode);
5176 rtx_insn *last = get_last_insn ();
5177 class expand_operand ops[4];
5179 create_output_operand (&ops[0], target, mode);
5180 create_fixed_operand (&ops[1], cond);
5181 create_input_operand (&ops[2], op1, mode);
5182 create_input_operand (&ops[3], op2, mode);
5184 if (maybe_expand_insn (icode, 4, ops))
5186 if (ops[0].value != target)
5187 convert_move (target, ops[0].value, false);
5189 return target;
5191 delete_insns_since (last);
5192 return NULL_RTX;
5195 /* Emit a conditional addition instruction if the machine supports one for that
5196 condition and machine mode.
5198 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
5199 the mode to use should they be constants. If it is VOIDmode, they cannot
5200 both be constants.
5202 OP2 should be stored in TARGET if the comparison is false, otherwise OP2+OP3
5203 should be stored there. MODE is the mode to use should they be constants.
5204 If it is VOIDmode, they cannot both be constants.
5206 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
5207 is not supported. */
5210 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
5211 machine_mode cmode, rtx op2, rtx op3,
5212 machine_mode mode, int unsignedp)
5214 rtx comparison;
5215 rtx_insn *last;
5216 enum insn_code icode;
5218 /* If one operand is constant, make it the second one. Only do this
5219 if the other operand is not constant as well. */
5221 if (swap_commutative_operands_p (op0, op1))
5223 std::swap (op0, op1);
5224 code = swap_condition (code);
5227 /* get_condition will prefer to generate LT and GT even if the old
5228 comparison was against zero, so undo that canonicalization here since
5229 comparisons against zero are cheaper. */
5230 if (code == LT && op1 == const1_rtx)
5231 code = LE, op1 = const0_rtx;
5232 else if (code == GT && op1 == constm1_rtx)
5233 code = GE, op1 = const0_rtx;
5235 if (cmode == VOIDmode)
5236 cmode = GET_MODE (op0);
5238 if (mode == VOIDmode)
5239 mode = GET_MODE (op2);
5241 icode = optab_handler (addcc_optab, mode);
5243 if (icode == CODE_FOR_nothing)
5244 return 0;
5246 if (!target)
5247 target = gen_reg_rtx (mode);
5249 code = unsignedp ? unsigned_condition (code) : code;
5250 comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
5252 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
5253 return NULL and let the caller figure out how best to deal with this
5254 situation. */
5255 if (!COMPARISON_P (comparison))
5256 return NULL_RTX;
5258 do_pending_stack_adjust ();
5259 last = get_last_insn ();
5260 prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
5261 GET_CODE (comparison), NULL_RTX, unsignedp, OPTAB_WIDEN,
5262 &comparison, &cmode);
5263 if (comparison)
5265 class expand_operand ops[4];
5267 create_output_operand (&ops[0], target, mode);
5268 create_fixed_operand (&ops[1], comparison);
5269 create_input_operand (&ops[2], op2, mode);
5270 create_input_operand (&ops[3], op3, mode);
5271 if (maybe_expand_insn (icode, 4, ops))
5273 if (ops[0].value != target)
5274 convert_move (target, ops[0].value, false);
5275 return target;
5278 delete_insns_since (last);
5279 return NULL_RTX;
5282 /* These functions attempt to generate an insn body, rather than
5283 emitting the insn, but if the gen function already emits them, we
5284 make no attempt to turn them back into naked patterns. */
5286 /* Generate and return an insn body to add Y to X. */
5288 rtx_insn *
5289 gen_add2_insn (rtx x, rtx y)
5291 enum insn_code icode = optab_handler (add_optab, GET_MODE (x));
5293 gcc_assert (insn_operand_matches (icode, 0, x));
5294 gcc_assert (insn_operand_matches (icode, 1, x));
5295 gcc_assert (insn_operand_matches (icode, 2, y));
5297 return GEN_FCN (icode) (x, x, y);
5300 /* Generate and return an insn body to add r1 and c,
5301 storing the result in r0. */
5303 rtx_insn *
5304 gen_add3_insn (rtx r0, rtx r1, rtx c)
5306 enum insn_code icode = optab_handler (add_optab, GET_MODE (r0));
5308 if (icode == CODE_FOR_nothing
5309 || !insn_operand_matches (icode, 0, r0)
5310 || !insn_operand_matches (icode, 1, r1)
5311 || !insn_operand_matches (icode, 2, c))
5312 return NULL;
5314 return GEN_FCN (icode) (r0, r1, c);
5318 have_add2_insn (rtx x, rtx y)
5320 enum insn_code icode;
5322 gcc_assert (GET_MODE (x) != VOIDmode);
5324 icode = optab_handler (add_optab, GET_MODE (x));
5326 if (icode == CODE_FOR_nothing)
5327 return 0;
5329 if (!insn_operand_matches (icode, 0, x)
5330 || !insn_operand_matches (icode, 1, x)
5331 || !insn_operand_matches (icode, 2, y))
5332 return 0;
5334 return 1;
5337 /* Generate and return an insn body to add Y to X. */
5339 rtx_insn *
5340 gen_addptr3_insn (rtx x, rtx y, rtx z)
5342 enum insn_code icode = optab_handler (addptr3_optab, GET_MODE (x));
5344 gcc_assert (insn_operand_matches (icode, 0, x));
5345 gcc_assert (insn_operand_matches (icode, 1, y));
5346 gcc_assert (insn_operand_matches (icode, 2, z));
5348 return GEN_FCN (icode) (x, y, z);
5351 /* Return true if the target implements an addptr pattern and X, Y,
5352 and Z are valid for the pattern predicates. */
5355 have_addptr3_insn (rtx x, rtx y, rtx z)
5357 enum insn_code icode;
5359 gcc_assert (GET_MODE (x) != VOIDmode);
5361 icode = optab_handler (addptr3_optab, GET_MODE (x));
5363 if (icode == CODE_FOR_nothing)
5364 return 0;
5366 if (!insn_operand_matches (icode, 0, x)
5367 || !insn_operand_matches (icode, 1, y)
5368 || !insn_operand_matches (icode, 2, z))
5369 return 0;
5371 return 1;
5374 /* Generate and return an insn body to subtract Y from X. */
5376 rtx_insn *
5377 gen_sub2_insn (rtx x, rtx y)
5379 enum insn_code icode = optab_handler (sub_optab, GET_MODE (x));
5381 gcc_assert (insn_operand_matches (icode, 0, x));
5382 gcc_assert (insn_operand_matches (icode, 1, x));
5383 gcc_assert (insn_operand_matches (icode, 2, y));
5385 return GEN_FCN (icode) (x, x, y);
5388 /* Generate and return an insn body to subtract r1 and c,
5389 storing the result in r0. */
5391 rtx_insn *
5392 gen_sub3_insn (rtx r0, rtx r1, rtx c)
5394 enum insn_code icode = optab_handler (sub_optab, GET_MODE (r0));
5396 if (icode == CODE_FOR_nothing
5397 || !insn_operand_matches (icode, 0, r0)
5398 || !insn_operand_matches (icode, 1, r1)
5399 || !insn_operand_matches (icode, 2, c))
5400 return NULL;
5402 return GEN_FCN (icode) (r0, r1, c);
5406 have_sub2_insn (rtx x, rtx y)
5408 enum insn_code icode;
5410 gcc_assert (GET_MODE (x) != VOIDmode);
5412 icode = optab_handler (sub_optab, GET_MODE (x));
5414 if (icode == CODE_FOR_nothing)
5415 return 0;
5417 if (!insn_operand_matches (icode, 0, x)
5418 || !insn_operand_matches (icode, 1, x)
5419 || !insn_operand_matches (icode, 2, y))
5420 return 0;
5422 return 1;
5425 /* Generate the body of an insn to extend Y (with mode MFROM)
5426 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
5428 rtx_insn *
5429 gen_extend_insn (rtx x, rtx y, machine_mode mto,
5430 machine_mode mfrom, int unsignedp)
5432 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
5433 return GEN_FCN (icode) (x, y);
5436 /* Generate code to convert FROM to floating point
5437 and store in TO. FROM must be fixed point and not VOIDmode.
5438 UNSIGNEDP nonzero means regard FROM as unsigned.
5439 Normally this is done by correcting the final value
5440 if it is negative. */
5442 void
5443 expand_float (rtx to, rtx from, int unsignedp)
5445 enum insn_code icode;
5446 rtx target = to;
5447 scalar_mode from_mode, to_mode;
5448 machine_mode fmode, imode;
5449 bool can_do_signed = false;
5451 /* Crash now, because we won't be able to decide which mode to use. */
5452 gcc_assert (GET_MODE (from) != VOIDmode);
5454 /* Look for an insn to do the conversion. Do it in the specified
5455 modes if possible; otherwise convert either input, output or both to
5456 wider mode. If the integer mode is wider than the mode of FROM,
5457 we can do the conversion signed even if the input is unsigned. */
5459 FOR_EACH_MODE_FROM (fmode, GET_MODE (to))
5460 FOR_EACH_MODE_FROM (imode, GET_MODE (from))
5462 int doing_unsigned = unsignedp;
5464 if (fmode != GET_MODE (to)
5465 && (significand_size (fmode)
5466 < GET_MODE_UNIT_PRECISION (GET_MODE (from))))
5467 continue;
5469 icode = can_float_p (fmode, imode, unsignedp);
5470 if (icode == CODE_FOR_nothing && unsignedp)
5472 enum insn_code scode = can_float_p (fmode, imode, 0);
5473 if (scode != CODE_FOR_nothing)
5474 can_do_signed = true;
5475 if (imode != GET_MODE (from))
5476 icode = scode, doing_unsigned = 0;
5479 if (icode != CODE_FOR_nothing)
5481 if (imode != GET_MODE (from))
5482 from = convert_to_mode (imode, from, unsignedp);
5484 if (fmode != GET_MODE (to))
5485 target = gen_reg_rtx (fmode);
5487 emit_unop_insn (icode, target, from,
5488 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
5490 if (target != to)
5491 convert_move (to, target, 0);
5492 return;
5496 /* Unsigned integer, and no way to convert directly. Convert as signed,
5497 then unconditionally adjust the result. */
5498 if (unsignedp
5499 && can_do_signed
5500 && is_a <scalar_mode> (GET_MODE (to), &to_mode)
5501 && is_a <scalar_mode> (GET_MODE (from), &from_mode))
5503 opt_scalar_mode fmode_iter;
5504 rtx_code_label *label = gen_label_rtx ();
5505 rtx temp;
5506 REAL_VALUE_TYPE offset;
5508 /* Look for a usable floating mode FMODE wider than the source and at
5509 least as wide as the target. Using FMODE will avoid rounding woes
5510 with unsigned values greater than the signed maximum value. */
5512 FOR_EACH_MODE_FROM (fmode_iter, to_mode)
5514 scalar_mode fmode = fmode_iter.require ();
5515 if (GET_MODE_PRECISION (from_mode) < GET_MODE_BITSIZE (fmode)
5516 && can_float_p (fmode, from_mode, 0) != CODE_FOR_nothing)
5517 break;
5520 if (!fmode_iter.exists (&fmode))
5522 /* There is no such mode. Pretend the target is wide enough. */
5523 fmode = to_mode;
5525 /* Avoid double-rounding when TO is narrower than FROM. */
5526 if ((significand_size (fmode) + 1)
5527 < GET_MODE_PRECISION (from_mode))
5529 rtx temp1;
5530 rtx_code_label *neglabel = gen_label_rtx ();
5532 /* Don't use TARGET if it isn't a register, is a hard register,
5533 or is the wrong mode. */
5534 if (!REG_P (target)
5535 || REGNO (target) < FIRST_PSEUDO_REGISTER
5536 || GET_MODE (target) != fmode)
5537 target = gen_reg_rtx (fmode);
5539 imode = from_mode;
5540 do_pending_stack_adjust ();
5542 /* Test whether the sign bit is set. */
5543 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
5544 0, neglabel);
5546 /* The sign bit is not set. Convert as signed. */
5547 expand_float (target, from, 0);
5548 emit_jump_insn (targetm.gen_jump (label));
5549 emit_barrier ();
5551 /* The sign bit is set.
5552 Convert to a usable (positive signed) value by shifting right
5553 one bit, while remembering if a nonzero bit was shifted
5554 out; i.e., compute (from & 1) | (from >> 1). */
5556 emit_label (neglabel);
5557 temp = expand_binop (imode, and_optab, from, const1_rtx,
5558 NULL_RTX, 1, OPTAB_LIB_WIDEN);
5559 temp1 = expand_shift (RSHIFT_EXPR, imode, from, 1, NULL_RTX, 1);
5560 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
5561 OPTAB_LIB_WIDEN);
5562 expand_float (target, temp, 0);
5564 /* Multiply by 2 to undo the shift above. */
5565 temp = expand_binop (fmode, add_optab, target, target,
5566 target, 0, OPTAB_LIB_WIDEN);
5567 if (temp != target)
5568 emit_move_insn (target, temp);
5570 do_pending_stack_adjust ();
5571 emit_label (label);
5572 goto done;
5576 /* If we are about to do some arithmetic to correct for an
5577 unsigned operand, do it in a pseudo-register. */
5579 if (to_mode != fmode
5580 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
5581 target = gen_reg_rtx (fmode);
5583 /* Convert as signed integer to floating. */
5584 expand_float (target, from, 0);
5586 /* If FROM is negative (and therefore TO is negative),
5587 correct its value by 2**bitwidth. */
5589 do_pending_stack_adjust ();
5590 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, from_mode,
5591 0, label);
5594 real_2expN (&offset, GET_MODE_PRECISION (from_mode), fmode);
5595 temp = expand_binop (fmode, add_optab, target,
5596 const_double_from_real_value (offset, fmode),
5597 target, 0, OPTAB_LIB_WIDEN);
5598 if (temp != target)
5599 emit_move_insn (target, temp);
5601 do_pending_stack_adjust ();
5602 emit_label (label);
5603 goto done;
5606 /* No hardware instruction available; call a library routine. */
5608 rtx libfunc;
5609 rtx_insn *insns;
5610 rtx value;
5611 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
5613 if (is_narrower_int_mode (GET_MODE (from), SImode))
5614 from = convert_to_mode (SImode, from, unsignedp);
5616 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
5617 gcc_assert (libfunc);
5619 start_sequence ();
5621 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5622 GET_MODE (to), from, GET_MODE (from));
5623 insns = get_insns ();
5624 end_sequence ();
5626 emit_libcall_block (insns, target, value,
5627 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FLOAT : FLOAT,
5628 GET_MODE (to), from));
5631 done:
5633 /* Copy result to requested destination
5634 if we have been computing in a temp location. */
5636 if (target != to)
5638 if (GET_MODE (target) == GET_MODE (to))
5639 emit_move_insn (to, target);
5640 else
5641 convert_move (to, target, 0);
5645 /* Generate code to convert FROM to fixed point and store in TO. FROM
5646 must be floating point. */
5648 void
5649 expand_fix (rtx to, rtx from, int unsignedp)
5651 enum insn_code icode;
5652 rtx target = to;
5653 machine_mode fmode, imode;
5654 opt_scalar_mode fmode_iter;
5655 bool must_trunc = false;
5657 /* We first try to find a pair of modes, one real and one integer, at
5658 least as wide as FROM and TO, respectively, in which we can open-code
5659 this conversion. If the integer mode is wider than the mode of TO,
5660 we can do the conversion either signed or unsigned. */
5662 FOR_EACH_MODE_FROM (fmode, GET_MODE (from))
5663 FOR_EACH_MODE_FROM (imode, GET_MODE (to))
5665 int doing_unsigned = unsignedp;
5667 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
5668 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
5669 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
5671 if (icode != CODE_FOR_nothing)
5673 rtx_insn *last = get_last_insn ();
5674 rtx from1 = from;
5675 if (fmode != GET_MODE (from))
5677 if (REAL_MODE_FORMAT (GET_MODE (from))
5678 == &arm_bfloat_half_format
5679 && REAL_MODE_FORMAT (fmode) == &ieee_single_format)
5680 /* The BF -> SF conversions can be just a shift, doesn't
5681 need to handle sNANs. */
5683 int save_flag_finite_math_only = flag_finite_math_only;
5684 flag_finite_math_only = true;
5685 from1 = convert_to_mode (fmode, from, 0);
5686 flag_finite_math_only = save_flag_finite_math_only;
5688 else
5689 from1 = convert_to_mode (fmode, from, 0);
5692 if (must_trunc)
5694 rtx temp = gen_reg_rtx (GET_MODE (from1));
5695 from1 = expand_unop (GET_MODE (from1), ftrunc_optab, from1,
5696 temp, 0);
5699 if (imode != GET_MODE (to))
5700 target = gen_reg_rtx (imode);
5702 if (maybe_emit_unop_insn (icode, target, from1,
5703 doing_unsigned ? UNSIGNED_FIX : FIX))
5705 if (target != to)
5706 convert_move (to, target, unsignedp);
5707 return;
5709 delete_insns_since (last);
5713 /* For an unsigned conversion, there is one more way to do it.
5714 If we have a signed conversion, we generate code that compares
5715 the real value to the largest representable positive number. If if
5716 is smaller, the conversion is done normally. Otherwise, subtract
5717 one plus the highest signed number, convert, and add it back.
5719 We only need to check all real modes, since we know we didn't find
5720 anything with a wider integer mode.
5722 This code used to extend FP value into mode wider than the destination.
5723 This is needed for decimal float modes which cannot accurately
5724 represent one plus the highest signed number of the same size, but
5725 not for binary modes. Consider, for instance conversion from SFmode
5726 into DImode.
5728 The hot path through the code is dealing with inputs smaller than 2^63
5729 and doing just the conversion, so there is no bits to lose.
5731 In the other path we know the value is positive in the range 2^63..2^64-1
5732 inclusive. (as for other input overflow happens and result is undefined)
5733 So we know that the most important bit set in mantissa corresponds to
5734 2^63. The subtraction of 2^63 should not generate any rounding as it
5735 simply clears out that bit. The rest is trivial. */
5737 scalar_int_mode to_mode;
5738 if (unsignedp
5739 && is_a <scalar_int_mode> (GET_MODE (to), &to_mode)
5740 && HWI_COMPUTABLE_MODE_P (to_mode))
5741 FOR_EACH_MODE_FROM (fmode_iter, as_a <scalar_mode> (GET_MODE (from)))
5743 scalar_mode fmode = fmode_iter.require ();
5744 if (CODE_FOR_nothing != can_fix_p (to_mode, fmode,
5745 0, &must_trunc)
5746 && (!DECIMAL_FLOAT_MODE_P (fmode)
5747 || (GET_MODE_BITSIZE (fmode) > GET_MODE_PRECISION (to_mode))))
5749 int bitsize;
5750 REAL_VALUE_TYPE offset;
5751 rtx limit;
5752 rtx_code_label *lab1, *lab2;
5753 rtx_insn *insn;
5755 bitsize = GET_MODE_PRECISION (to_mode);
5756 real_2expN (&offset, bitsize - 1, fmode);
5757 limit = const_double_from_real_value (offset, fmode);
5758 lab1 = gen_label_rtx ();
5759 lab2 = gen_label_rtx ();
5761 if (fmode != GET_MODE (from))
5763 if (REAL_MODE_FORMAT (GET_MODE (from))
5764 == &arm_bfloat_half_format
5765 && REAL_MODE_FORMAT (fmode) == &ieee_single_format)
5766 /* The BF -> SF conversions can be just a shift, doesn't
5767 need to handle sNANs. */
5769 int save_flag_finite_math_only = flag_finite_math_only;
5770 flag_finite_math_only = true;
5771 from = convert_to_mode (fmode, from, 0);
5772 flag_finite_math_only = save_flag_finite_math_only;
5774 else
5775 from = convert_to_mode (fmode, from, 0);
5778 /* See if we need to do the subtraction. */
5779 do_pending_stack_adjust ();
5780 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX,
5781 GET_MODE (from), 0, lab1);
5783 /* If not, do the signed "fix" and branch around fixup code. */
5784 expand_fix (to, from, 0);
5785 emit_jump_insn (targetm.gen_jump (lab2));
5786 emit_barrier ();
5788 /* Otherwise, subtract 2**(N-1), convert to signed number,
5789 then add 2**(N-1). Do the addition using XOR since this
5790 will often generate better code. */
5791 emit_label (lab1);
5792 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
5793 NULL_RTX, 0, OPTAB_LIB_WIDEN);
5794 expand_fix (to, target, 0);
5795 target = expand_binop (to_mode, xor_optab, to,
5796 gen_int_mode
5797 (HOST_WIDE_INT_1 << (bitsize - 1),
5798 to_mode),
5799 to, 1, OPTAB_LIB_WIDEN);
5801 if (target != to)
5802 emit_move_insn (to, target);
5804 emit_label (lab2);
5806 if (optab_handler (mov_optab, to_mode) != CODE_FOR_nothing)
5808 /* Make a place for a REG_NOTE and add it. */
5809 insn = emit_move_insn (to, to);
5810 set_dst_reg_note (insn, REG_EQUAL,
5811 gen_rtx_fmt_e (UNSIGNED_FIX, to_mode,
5812 copy_rtx (from)),
5813 to);
5816 return;
5820 #ifdef HAVE_SFmode
5821 if (REAL_MODE_FORMAT (GET_MODE (from)) == &arm_bfloat_half_format
5822 && REAL_MODE_FORMAT (SFmode) == &ieee_single_format)
5823 /* We don't have BF -> TI library functions, use BF -> SF -> TI
5824 instead but the BF -> SF conversion can be just a shift, doesn't
5825 need to handle sNANs. */
5827 int save_flag_finite_math_only = flag_finite_math_only;
5828 flag_finite_math_only = true;
5829 from = convert_to_mode (SFmode, from, 0);
5830 flag_finite_math_only = save_flag_finite_math_only;
5831 expand_fix (to, from, unsignedp);
5832 return;
5834 #endif
5836 /* We can't do it with an insn, so use a library call. But first ensure
5837 that the mode of TO is at least as wide as SImode, since those are the
5838 only library calls we know about. */
5840 if (is_narrower_int_mode (GET_MODE (to), SImode))
5842 target = gen_reg_rtx (SImode);
5844 expand_fix (target, from, unsignedp);
5846 else
5848 rtx_insn *insns;
5849 rtx value;
5850 rtx libfunc;
5852 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
5853 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
5854 gcc_assert (libfunc);
5856 start_sequence ();
5858 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5859 GET_MODE (to), from, GET_MODE (from));
5860 insns = get_insns ();
5861 end_sequence ();
5863 emit_libcall_block (insns, target, value,
5864 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
5865 GET_MODE (to), from));
5868 if (target != to)
5870 if (GET_MODE (to) == GET_MODE (target))
5871 emit_move_insn (to, target);
5872 else
5873 convert_move (to, target, 0);
5878 /* Promote integer arguments for a libcall if necessary.
5879 emit_library_call_value cannot do the promotion because it does not
5880 know if it should do a signed or unsigned promotion. This is because
5881 there are no tree types defined for libcalls. */
5883 static rtx
5884 prepare_libcall_arg (rtx arg, int uintp)
5886 scalar_int_mode mode;
5887 machine_mode arg_mode;
5888 if (is_a <scalar_int_mode> (GET_MODE (arg), &mode))
5890 /* If we need to promote the integer function argument we need to do
5891 it here instead of inside emit_library_call_value because in
5892 emit_library_call_value we don't know if we should do a signed or
5893 unsigned promotion. */
5895 int unsigned_p = 0;
5896 arg_mode = promote_function_mode (NULL_TREE, mode,
5897 &unsigned_p, NULL_TREE, 0);
5898 if (arg_mode != mode)
5899 return convert_to_mode (arg_mode, arg, uintp);
5901 return arg;
5904 /* Generate code to convert FROM or TO a fixed-point.
5905 If UINTP is true, either TO or FROM is an unsigned integer.
5906 If SATP is true, we need to saturate the result. */
5908 void
5909 expand_fixed_convert (rtx to, rtx from, int uintp, int satp)
5911 machine_mode to_mode = GET_MODE (to);
5912 machine_mode from_mode = GET_MODE (from);
5913 convert_optab tab;
5914 enum rtx_code this_code;
5915 enum insn_code code;
5916 rtx_insn *insns;
5917 rtx value;
5918 rtx libfunc;
5920 if (to_mode == from_mode)
5922 emit_move_insn (to, from);
5923 return;
5926 if (uintp)
5928 tab = satp ? satfractuns_optab : fractuns_optab;
5929 this_code = satp ? UNSIGNED_SAT_FRACT : UNSIGNED_FRACT_CONVERT;
5931 else
5933 tab = satp ? satfract_optab : fract_optab;
5934 this_code = satp ? SAT_FRACT : FRACT_CONVERT;
5936 code = convert_optab_handler (tab, to_mode, from_mode);
5937 if (code != CODE_FOR_nothing)
5939 emit_unop_insn (code, to, from, this_code);
5940 return;
5943 libfunc = convert_optab_libfunc (tab, to_mode, from_mode);
5944 gcc_assert (libfunc);
5946 from = prepare_libcall_arg (from, uintp);
5947 from_mode = GET_MODE (from);
5949 start_sequence ();
5950 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, to_mode,
5951 from, from_mode);
5952 insns = get_insns ();
5953 end_sequence ();
5955 emit_libcall_block (insns, to, value,
5956 gen_rtx_fmt_e (optab_to_code (tab), to_mode, from));
5959 /* Generate code to convert FROM to fixed point and store in TO. FROM
5960 must be floating point, TO must be signed. Use the conversion optab
5961 TAB to do the conversion. */
5963 bool
5964 expand_sfix_optab (rtx to, rtx from, convert_optab tab)
5966 enum insn_code icode;
5967 rtx target = to;
5968 machine_mode fmode, imode;
5970 /* We first try to find a pair of modes, one real and one integer, at
5971 least as wide as FROM and TO, respectively, in which we can open-code
5972 this conversion. If the integer mode is wider than the mode of TO,
5973 we can do the conversion either signed or unsigned. */
5975 FOR_EACH_MODE_FROM (fmode, GET_MODE (from))
5976 FOR_EACH_MODE_FROM (imode, GET_MODE (to))
5978 icode = convert_optab_handler (tab, imode, fmode,
5979 insn_optimization_type ());
5980 if (icode != CODE_FOR_nothing)
5982 rtx_insn *last = get_last_insn ();
5983 if (fmode != GET_MODE (from))
5984 from = convert_to_mode (fmode, from, 0);
5986 if (imode != GET_MODE (to))
5987 target = gen_reg_rtx (imode);
5989 if (!maybe_emit_unop_insn (icode, target, from, UNKNOWN))
5991 delete_insns_since (last);
5992 continue;
5994 if (target != to)
5995 convert_move (to, target, 0);
5996 return true;
6000 return false;
6003 /* Report whether we have an instruction to perform the operation
6004 specified by CODE on operands of mode MODE. */
6006 have_insn_for (enum rtx_code code, machine_mode mode)
6008 return (code_to_optab (code)
6009 && (optab_handler (code_to_optab (code), mode)
6010 != CODE_FOR_nothing));
6013 /* Print information about the current contents of the optabs on
6014 STDERR. */
6016 DEBUG_FUNCTION void
6017 debug_optab_libfuncs (void)
6019 int i, j, k;
6021 /* Dump the arithmetic optabs. */
6022 for (i = FIRST_NORM_OPTAB; i <= LAST_NORMLIB_OPTAB; ++i)
6023 for (j = 0; j < NUM_MACHINE_MODES; ++j)
6025 rtx l = optab_libfunc ((optab) i, (machine_mode) j);
6026 if (l)
6028 gcc_assert (GET_CODE (l) == SYMBOL_REF);
6029 fprintf (stderr, "%s\t%s:\t%s\n",
6030 GET_RTX_NAME (optab_to_code ((optab) i)),
6031 GET_MODE_NAME (j),
6032 XSTR (l, 0));
6036 /* Dump the conversion optabs. */
6037 for (i = FIRST_CONV_OPTAB; i <= LAST_CONVLIB_OPTAB; ++i)
6038 for (j = 0; j < NUM_MACHINE_MODES; ++j)
6039 for (k = 0; k < NUM_MACHINE_MODES; ++k)
6041 rtx l = convert_optab_libfunc ((optab) i, (machine_mode) j,
6042 (machine_mode) k);
6043 if (l)
6045 gcc_assert (GET_CODE (l) == SYMBOL_REF);
6046 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
6047 GET_RTX_NAME (optab_to_code ((optab) i)),
6048 GET_MODE_NAME (j),
6049 GET_MODE_NAME (k),
6050 XSTR (l, 0));
6055 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
6056 CODE. Return 0 on failure. */
6058 rtx_insn *
6059 gen_cond_trap (enum rtx_code code, rtx op1, rtx op2, rtx tcode)
6061 machine_mode mode = GET_MODE (op1);
6062 enum insn_code icode;
6063 rtx_insn *insn;
6064 rtx trap_rtx;
6066 if (mode == VOIDmode)
6067 return 0;
6069 icode = optab_handler (ctrap_optab, mode);
6070 if (icode == CODE_FOR_nothing)
6071 return 0;
6073 /* Some targets only accept a zero trap code. */
6074 if (!insn_operand_matches (icode, 3, tcode))
6075 return 0;
6077 do_pending_stack_adjust ();
6078 start_sequence ();
6079 prepare_cmp_insn (op1, op2, code, NULL_RTX, false, OPTAB_DIRECT,
6080 &trap_rtx, &mode);
6081 if (!trap_rtx)
6082 insn = NULL;
6083 else
6084 insn = GEN_FCN (icode) (trap_rtx, XEXP (trap_rtx, 0), XEXP (trap_rtx, 1),
6085 tcode);
6087 /* If that failed, then give up. */
6088 if (insn == 0)
6090 end_sequence ();
6091 return 0;
6094 emit_insn (insn);
6095 insn = get_insns ();
6096 end_sequence ();
6097 return insn;
6100 /* Return rtx code for TCODE or UNKNOWN. Use UNSIGNEDP to select signed
6101 or unsigned operation code. */
6103 enum rtx_code
6104 get_rtx_code_1 (enum tree_code tcode, bool unsignedp)
6106 enum rtx_code code;
6107 switch (tcode)
6109 case EQ_EXPR:
6110 code = EQ;
6111 break;
6112 case NE_EXPR:
6113 code = NE;
6114 break;
6115 case LT_EXPR:
6116 code = unsignedp ? LTU : LT;
6117 break;
6118 case LE_EXPR:
6119 code = unsignedp ? LEU : LE;
6120 break;
6121 case GT_EXPR:
6122 code = unsignedp ? GTU : GT;
6123 break;
6124 case GE_EXPR:
6125 code = unsignedp ? GEU : GE;
6126 break;
6128 case UNORDERED_EXPR:
6129 code = UNORDERED;
6130 break;
6131 case ORDERED_EXPR:
6132 code = ORDERED;
6133 break;
6134 case UNLT_EXPR:
6135 code = UNLT;
6136 break;
6137 case UNLE_EXPR:
6138 code = UNLE;
6139 break;
6140 case UNGT_EXPR:
6141 code = UNGT;
6142 break;
6143 case UNGE_EXPR:
6144 code = UNGE;
6145 break;
6146 case UNEQ_EXPR:
6147 code = UNEQ;
6148 break;
6149 case LTGT_EXPR:
6150 code = LTGT;
6151 break;
6153 case BIT_AND_EXPR:
6154 code = AND;
6155 break;
6157 case BIT_IOR_EXPR:
6158 code = IOR;
6159 break;
6161 default:
6162 code = UNKNOWN;
6163 break;
6165 return code;
6168 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
6169 or unsigned operation code. */
6171 enum rtx_code
6172 get_rtx_code (enum tree_code tcode, bool unsignedp)
6174 enum rtx_code code = get_rtx_code_1 (tcode, unsignedp);
6175 gcc_assert (code != UNKNOWN);
6176 return code;
6179 /* Return a comparison rtx of mode CMP_MODE for COND. Use UNSIGNEDP to
6180 select signed or unsigned operators. OPNO holds the index of the
6181 first comparison operand for insn ICODE. Do not generate the
6182 compare instruction itself. */
6185 vector_compare_rtx (machine_mode cmp_mode, enum tree_code tcode,
6186 tree t_op0, tree t_op1, bool unsignedp,
6187 enum insn_code icode, unsigned int opno)
6189 class expand_operand ops[2];
6190 rtx rtx_op0, rtx_op1;
6191 machine_mode m0, m1;
6192 enum rtx_code rcode = get_rtx_code (tcode, unsignedp);
6194 gcc_assert (TREE_CODE_CLASS (tcode) == tcc_comparison);
6196 /* Expand operands. For vector types with scalar modes, e.g. where int64x1_t
6197 has mode DImode, this can produce a constant RTX of mode VOIDmode; in such
6198 cases, use the original mode. */
6199 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)),
6200 EXPAND_STACK_PARM);
6201 m0 = GET_MODE (rtx_op0);
6202 if (m0 == VOIDmode)
6203 m0 = TYPE_MODE (TREE_TYPE (t_op0));
6205 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)),
6206 EXPAND_STACK_PARM);
6207 m1 = GET_MODE (rtx_op1);
6208 if (m1 == VOIDmode)
6209 m1 = TYPE_MODE (TREE_TYPE (t_op1));
6211 create_input_operand (&ops[0], rtx_op0, m0);
6212 create_input_operand (&ops[1], rtx_op1, m1);
6213 if (!maybe_legitimize_operands (icode, opno, 2, ops))
6214 gcc_unreachable ();
6215 return gen_rtx_fmt_ee (rcode, cmp_mode, ops[0].value, ops[1].value);
6218 /* Check if vec_perm mask SEL is a constant equivalent to a shift of
6219 the first vec_perm operand, assuming the second operand (for left shift
6220 first operand) is a constant vector of zeros. Return the shift distance
6221 in bits if so, or NULL_RTX if the vec_perm is not a shift. MODE is the
6222 mode of the value being shifted. SHIFT_OPTAB is vec_shr_optab for right
6223 shift or vec_shl_optab for left shift. */
6224 static rtx
6225 shift_amt_for_vec_perm_mask (machine_mode mode, const vec_perm_indices &sel,
6226 optab shift_optab)
6228 unsigned int bitsize = GET_MODE_UNIT_BITSIZE (mode);
6229 poly_int64 first = sel[0];
6230 if (maybe_ge (sel[0], GET_MODE_NUNITS (mode)))
6231 return NULL_RTX;
6233 if (shift_optab == vec_shl_optab)
6235 unsigned int nelt;
6236 if (!GET_MODE_NUNITS (mode).is_constant (&nelt))
6237 return NULL_RTX;
6238 unsigned firstidx = 0;
6239 for (unsigned int i = 0; i < nelt; i++)
6241 if (known_eq (sel[i], nelt))
6243 if (i == 0 || firstidx)
6244 return NULL_RTX;
6245 firstidx = i;
6247 else if (firstidx
6248 ? maybe_ne (sel[i], nelt + i - firstidx)
6249 : maybe_ge (sel[i], nelt))
6250 return NULL_RTX;
6253 if (firstidx == 0)
6254 return NULL_RTX;
6255 first = firstidx;
6257 else if (!sel.series_p (0, 1, first, 1))
6259 unsigned int nelt;
6260 if (!GET_MODE_NUNITS (mode).is_constant (&nelt))
6261 return NULL_RTX;
6262 for (unsigned int i = 1; i < nelt; i++)
6264 poly_int64 expected = i + first;
6265 /* Indices into the second vector are all equivalent. */
6266 if (maybe_lt (sel[i], nelt)
6267 ? maybe_ne (sel[i], expected)
6268 : maybe_lt (expected, nelt))
6269 return NULL_RTX;
6273 return gen_int_shift_amount (mode, first * bitsize);
6276 /* A subroutine of expand_vec_perm_var for expanding one vec_perm insn. */
6278 static rtx
6279 expand_vec_perm_1 (enum insn_code icode, rtx target,
6280 rtx v0, rtx v1, rtx sel)
6282 machine_mode tmode = GET_MODE (target);
6283 machine_mode smode = GET_MODE (sel);
6284 class expand_operand ops[4];
6286 gcc_assert (GET_MODE_CLASS (smode) == MODE_VECTOR_INT
6287 || related_int_vector_mode (tmode).require () == smode);
6288 create_output_operand (&ops[0], target, tmode);
6289 create_input_operand (&ops[3], sel, smode);
6291 /* Make an effort to preserve v0 == v1. The target expander is able to
6292 rely on this to determine if we're permuting a single input operand. */
6293 if (rtx_equal_p (v0, v1))
6295 if (!insn_operand_matches (icode, 1, v0))
6296 v0 = force_reg (tmode, v0);
6297 gcc_checking_assert (insn_operand_matches (icode, 1, v0));
6298 gcc_checking_assert (insn_operand_matches (icode, 2, v0));
6300 create_fixed_operand (&ops[1], v0);
6301 create_fixed_operand (&ops[2], v0);
6303 else
6305 create_input_operand (&ops[1], v0, tmode);
6306 create_input_operand (&ops[2], v1, tmode);
6309 if (maybe_expand_insn (icode, 4, ops))
6310 return ops[0].value;
6311 return NULL_RTX;
6314 /* Implement a permutation of vectors v0 and v1 using the permutation
6315 vector in SEL and return the result. Use TARGET to hold the result
6316 if nonnull and convenient.
6318 MODE is the mode of the vectors being permuted (V0 and V1). SEL_MODE
6319 is the TYPE_MODE associated with SEL, or BLKmode if SEL isn't known
6320 to have a particular mode. */
6323 expand_vec_perm_const (machine_mode mode, rtx v0, rtx v1,
6324 const vec_perm_builder &sel, machine_mode sel_mode,
6325 rtx target)
6327 if (!target || !register_operand (target, mode))
6328 target = gen_reg_rtx (mode);
6330 /* Set QIMODE to a different vector mode with byte elements.
6331 If no such mode, or if MODE already has byte elements, use VOIDmode. */
6332 machine_mode qimode;
6333 if (!qimode_for_vec_perm (mode).exists (&qimode))
6334 qimode = VOIDmode;
6336 rtx_insn *last = get_last_insn ();
6338 bool single_arg_p = rtx_equal_p (v0, v1);
6339 /* Always specify two input vectors here and leave the target to handle
6340 cases in which the inputs are equal. Not all backends can cope with
6341 the single-input representation when testing for a double-input
6342 target instruction. */
6343 vec_perm_indices indices (sel, 2, GET_MODE_NUNITS (mode));
6345 /* See if this can be handled with a vec_shr or vec_shl. We only do this
6346 if the second (for vec_shr) or first (for vec_shl) vector is all
6347 zeroes. */
6348 insn_code shift_code = CODE_FOR_nothing;
6349 insn_code shift_code_qi = CODE_FOR_nothing;
6350 optab shift_optab = unknown_optab;
6351 rtx v2 = v0;
6352 if (v1 == CONST0_RTX (GET_MODE (v1)))
6353 shift_optab = vec_shr_optab;
6354 else if (v0 == CONST0_RTX (GET_MODE (v0)))
6356 shift_optab = vec_shl_optab;
6357 v2 = v1;
6359 if (shift_optab != unknown_optab)
6361 shift_code = optab_handler (shift_optab, mode);
6362 shift_code_qi = ((qimode != VOIDmode && qimode != mode)
6363 ? optab_handler (shift_optab, qimode)
6364 : CODE_FOR_nothing);
6366 if (shift_code != CODE_FOR_nothing || shift_code_qi != CODE_FOR_nothing)
6368 rtx shift_amt = shift_amt_for_vec_perm_mask (mode, indices, shift_optab);
6369 if (shift_amt)
6371 class expand_operand ops[3];
6372 if (shift_amt == const0_rtx)
6373 return v2;
6374 if (shift_code != CODE_FOR_nothing)
6376 create_output_operand (&ops[0], target, mode);
6377 create_input_operand (&ops[1], v2, mode);
6378 create_convert_operand_from_type (&ops[2], shift_amt, sizetype);
6379 if (maybe_expand_insn (shift_code, 3, ops))
6380 return ops[0].value;
6382 if (shift_code_qi != CODE_FOR_nothing)
6384 rtx tmp = gen_reg_rtx (qimode);
6385 create_output_operand (&ops[0], tmp, qimode);
6386 create_input_operand (&ops[1], gen_lowpart (qimode, v2), qimode);
6387 create_convert_operand_from_type (&ops[2], shift_amt, sizetype);
6388 if (maybe_expand_insn (shift_code_qi, 3, ops))
6389 return gen_lowpart (mode, ops[0].value);
6394 if (targetm.vectorize.vec_perm_const != NULL)
6396 if (single_arg_p)
6397 v1 = v0;
6399 gcc_checking_assert (GET_MODE (v0) == GET_MODE (v1));
6400 machine_mode op_mode = GET_MODE (v0);
6401 if (targetm.vectorize.vec_perm_const (mode, op_mode, target, v0, v1,
6402 indices))
6403 return target;
6406 /* Fall back to a constant byte-based permutation. */
6407 vec_perm_indices qimode_indices;
6408 rtx target_qi = NULL_RTX, v0_qi = NULL_RTX, v1_qi = NULL_RTX;
6409 if (qimode != VOIDmode)
6411 qimode_indices.new_expanded_vector (indices, GET_MODE_UNIT_SIZE (mode));
6412 target_qi = gen_reg_rtx (qimode);
6413 v0_qi = gen_lowpart (qimode, v0);
6414 v1_qi = gen_lowpart (qimode, v1);
6415 if (targetm.vectorize.vec_perm_const != NULL
6416 && targetm.vectorize.vec_perm_const (qimode, qimode, target_qi, v0_qi,
6417 v1_qi, qimode_indices))
6418 return gen_lowpart (mode, target_qi);
6421 v0 = force_reg (mode, v0);
6422 if (single_arg_p)
6423 v1 = v0;
6424 v1 = force_reg (mode, v1);
6426 /* Otherwise expand as a fully variable permuation. */
6428 /* The optabs are only defined for selectors with the same width
6429 as the values being permuted. */
6430 machine_mode required_sel_mode;
6431 if (!related_int_vector_mode (mode).exists (&required_sel_mode))
6433 delete_insns_since (last);
6434 return NULL_RTX;
6437 /* We know that it is semantically valid to treat SEL as having SEL_MODE.
6438 If that isn't the mode we want then we need to prove that using
6439 REQUIRED_SEL_MODE is OK. */
6440 if (sel_mode != required_sel_mode)
6442 if (!selector_fits_mode_p (required_sel_mode, indices))
6444 delete_insns_since (last);
6445 return NULL_RTX;
6447 sel_mode = required_sel_mode;
6450 insn_code icode = direct_optab_handler (vec_perm_optab, mode);
6451 if (icode != CODE_FOR_nothing)
6453 rtx sel_rtx = vec_perm_indices_to_rtx (sel_mode, indices);
6454 rtx tmp = expand_vec_perm_1 (icode, target, v0, v1, sel_rtx);
6455 if (tmp)
6456 return tmp;
6459 if (qimode != VOIDmode
6460 && selector_fits_mode_p (qimode, qimode_indices))
6462 icode = direct_optab_handler (vec_perm_optab, qimode);
6463 if (icode != CODE_FOR_nothing)
6465 rtx sel_qi = vec_perm_indices_to_rtx (qimode, qimode_indices);
6466 rtx tmp = expand_vec_perm_1 (icode, target_qi, v0_qi, v1_qi, sel_qi);
6467 if (tmp)
6468 return gen_lowpart (mode, tmp);
6472 delete_insns_since (last);
6473 return NULL_RTX;
6476 /* Implement a permutation of vectors v0 and v1 using the permutation
6477 vector in SEL and return the result. Use TARGET to hold the result
6478 if nonnull and convenient.
6480 MODE is the mode of the vectors being permuted (V0 and V1).
6481 SEL must have the integer equivalent of MODE and is known to be
6482 unsuitable for permutes with a constant permutation vector. */
6485 expand_vec_perm_var (machine_mode mode, rtx v0, rtx v1, rtx sel, rtx target)
6487 enum insn_code icode;
6488 unsigned int i, u;
6489 rtx tmp, sel_qi;
6491 u = GET_MODE_UNIT_SIZE (mode);
6493 if (!target || GET_MODE (target) != mode)
6494 target = gen_reg_rtx (mode);
6496 icode = direct_optab_handler (vec_perm_optab, mode);
6497 if (icode != CODE_FOR_nothing)
6499 tmp = expand_vec_perm_1 (icode, target, v0, v1, sel);
6500 if (tmp)
6501 return tmp;
6504 /* As a special case to aid several targets, lower the element-based
6505 permutation to a byte-based permutation and try again. */
6506 machine_mode qimode;
6507 if (!qimode_for_vec_perm (mode).exists (&qimode)
6508 || maybe_gt (GET_MODE_NUNITS (qimode), GET_MODE_MASK (QImode) + 1))
6509 return NULL_RTX;
6510 icode = direct_optab_handler (vec_perm_optab, qimode);
6511 if (icode == CODE_FOR_nothing)
6512 return NULL_RTX;
6514 /* Multiply each element by its byte size. */
6515 machine_mode selmode = GET_MODE (sel);
6516 if (u == 2)
6517 sel = expand_simple_binop (selmode, PLUS, sel, sel,
6518 NULL, 0, OPTAB_DIRECT);
6519 else
6520 sel = expand_simple_binop (selmode, ASHIFT, sel,
6521 gen_int_shift_amount (selmode, exact_log2 (u)),
6522 NULL, 0, OPTAB_DIRECT);
6523 gcc_assert (sel != NULL);
6525 /* Broadcast the low byte each element into each of its bytes.
6526 The encoding has U interleaved stepped patterns, one for each
6527 byte of an element. */
6528 vec_perm_builder const_sel (GET_MODE_SIZE (mode), u, 3);
6529 unsigned int low_byte_in_u = BYTES_BIG_ENDIAN ? u - 1 : 0;
6530 for (i = 0; i < 3; ++i)
6531 for (unsigned int j = 0; j < u; ++j)
6532 const_sel.quick_push (i * u + low_byte_in_u);
6533 sel = gen_lowpart (qimode, sel);
6534 sel = expand_vec_perm_const (qimode, sel, sel, const_sel, qimode, NULL);
6535 gcc_assert (sel != NULL);
6537 /* Add the byte offset to each byte element. */
6538 /* Note that the definition of the indicies here is memory ordering,
6539 so there should be no difference between big and little endian. */
6540 rtx_vector_builder byte_indices (qimode, u, 1);
6541 for (i = 0; i < u; ++i)
6542 byte_indices.quick_push (GEN_INT (i));
6543 tmp = byte_indices.build ();
6544 sel_qi = expand_simple_binop (qimode, PLUS, sel, tmp,
6545 sel, 0, OPTAB_DIRECT);
6546 gcc_assert (sel_qi != NULL);
6548 tmp = mode != qimode ? gen_reg_rtx (qimode) : target;
6549 tmp = expand_vec_perm_1 (icode, tmp, gen_lowpart (qimode, v0),
6550 gen_lowpart (qimode, v1), sel_qi);
6551 if (tmp)
6552 tmp = gen_lowpart (mode, tmp);
6553 return tmp;
6556 /* Generate VEC_SERIES_EXPR <OP0, OP1>, returning a value of mode VMODE.
6557 Use TARGET for the result if nonnull and convenient. */
6560 expand_vec_series_expr (machine_mode vmode, rtx op0, rtx op1, rtx target)
6562 class expand_operand ops[3];
6563 enum insn_code icode;
6564 machine_mode emode = GET_MODE_INNER (vmode);
6566 icode = direct_optab_handler (vec_series_optab, vmode);
6567 gcc_assert (icode != CODE_FOR_nothing);
6569 create_output_operand (&ops[0], target, vmode);
6570 create_input_operand (&ops[1], op0, emode);
6571 create_input_operand (&ops[2], op1, emode);
6573 expand_insn (icode, 3, ops);
6574 return ops[0].value;
6577 /* Generate insns for a vector comparison into a mask. */
6580 expand_vec_cmp_expr (tree type, tree exp, rtx target)
6582 class expand_operand ops[4];
6583 enum insn_code icode;
6584 rtx comparison;
6585 machine_mode mask_mode = TYPE_MODE (type);
6586 machine_mode vmode;
6587 bool unsignedp;
6588 tree op0a, op0b;
6589 enum tree_code tcode;
6591 op0a = TREE_OPERAND (exp, 0);
6592 op0b = TREE_OPERAND (exp, 1);
6593 tcode = TREE_CODE (exp);
6595 unsignedp = TYPE_UNSIGNED (TREE_TYPE (op0a));
6596 vmode = TYPE_MODE (TREE_TYPE (op0a));
6598 icode = get_vec_cmp_icode (vmode, mask_mode, unsignedp);
6599 if (icode == CODE_FOR_nothing)
6601 if (tcode == EQ_EXPR || tcode == NE_EXPR)
6602 icode = get_vec_cmp_eq_icode (vmode, mask_mode);
6603 if (icode == CODE_FOR_nothing)
6604 return 0;
6607 comparison = vector_compare_rtx (mask_mode, tcode, op0a, op0b,
6608 unsignedp, icode, 2);
6609 create_output_operand (&ops[0], target, mask_mode);
6610 create_fixed_operand (&ops[1], comparison);
6611 create_fixed_operand (&ops[2], XEXP (comparison, 0));
6612 create_fixed_operand (&ops[3], XEXP (comparison, 1));
6613 expand_insn (icode, 4, ops);
6614 return ops[0].value;
6617 /* Expand a highpart multiply. */
6620 expand_mult_highpart (machine_mode mode, rtx op0, rtx op1,
6621 rtx target, bool uns_p)
6623 class expand_operand eops[3];
6624 enum insn_code icode;
6625 int method, i;
6626 machine_mode wmode;
6627 rtx m1, m2;
6628 optab tab1, tab2;
6630 method = can_mult_highpart_p (mode, uns_p);
6631 switch (method)
6633 case 0:
6634 return NULL_RTX;
6635 case 1:
6636 tab1 = uns_p ? umul_highpart_optab : smul_highpart_optab;
6637 return expand_binop (mode, tab1, op0, op1, target, uns_p,
6638 OPTAB_LIB_WIDEN);
6639 case 2:
6640 tab1 = uns_p ? vec_widen_umult_even_optab : vec_widen_smult_even_optab;
6641 tab2 = uns_p ? vec_widen_umult_odd_optab : vec_widen_smult_odd_optab;
6642 break;
6643 case 3:
6644 tab1 = uns_p ? vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
6645 tab2 = uns_p ? vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
6646 if (BYTES_BIG_ENDIAN)
6647 std::swap (tab1, tab2);
6648 break;
6649 default:
6650 gcc_unreachable ();
6653 icode = optab_handler (tab1, mode);
6654 wmode = insn_data[icode].operand[0].mode;
6655 gcc_checking_assert (known_eq (2 * GET_MODE_NUNITS (wmode),
6656 GET_MODE_NUNITS (mode)));
6657 gcc_checking_assert (known_eq (GET_MODE_SIZE (wmode), GET_MODE_SIZE (mode)));
6659 create_output_operand (&eops[0], gen_reg_rtx (wmode), wmode);
6660 create_input_operand (&eops[1], op0, mode);
6661 create_input_operand (&eops[2], op1, mode);
6662 expand_insn (icode, 3, eops);
6663 m1 = gen_lowpart (mode, eops[0].value);
6665 create_output_operand (&eops[0], gen_reg_rtx (wmode), wmode);
6666 create_input_operand (&eops[1], op0, mode);
6667 create_input_operand (&eops[2], op1, mode);
6668 expand_insn (optab_handler (tab2, mode), 3, eops);
6669 m2 = gen_lowpart (mode, eops[0].value);
6671 vec_perm_builder sel;
6672 if (method == 2)
6674 /* The encoding has 2 interleaved stepped patterns. */
6675 sel.new_vector (GET_MODE_NUNITS (mode), 2, 3);
6676 for (i = 0; i < 6; ++i)
6677 sel.quick_push (!BYTES_BIG_ENDIAN + (i & ~1)
6678 + ((i & 1) ? GET_MODE_NUNITS (mode) : 0));
6680 else
6682 /* The encoding has a single interleaved stepped pattern. */
6683 sel.new_vector (GET_MODE_NUNITS (mode), 1, 3);
6684 for (i = 0; i < 3; ++i)
6685 sel.quick_push (2 * i + (BYTES_BIG_ENDIAN ? 0 : 1));
6688 return expand_vec_perm_const (mode, m1, m2, sel, BLKmode, target);
6691 /* Helper function to find the MODE_CC set in a sync_compare_and_swap
6692 pattern. */
6694 static void
6695 find_cc_set (rtx x, const_rtx pat, void *data)
6697 if (REG_P (x) && GET_MODE_CLASS (GET_MODE (x)) == MODE_CC
6698 && GET_CODE (pat) == SET)
6700 rtx *p_cc_reg = (rtx *) data;
6701 gcc_assert (!*p_cc_reg);
6702 *p_cc_reg = x;
6706 /* This is a helper function for the other atomic operations. This function
6707 emits a loop that contains SEQ that iterates until a compare-and-swap
6708 operation at the end succeeds. MEM is the memory to be modified. SEQ is
6709 a set of instructions that takes a value from OLD_REG as an input and
6710 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
6711 set to the current contents of MEM. After SEQ, a compare-and-swap will
6712 attempt to update MEM with NEW_REG. The function returns true when the
6713 loop was generated successfully. */
6715 static bool
6716 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
6718 machine_mode mode = GET_MODE (mem);
6719 rtx_code_label *label;
6720 rtx cmp_reg, success, oldval;
6722 /* The loop we want to generate looks like
6724 cmp_reg = mem;
6725 label:
6726 old_reg = cmp_reg;
6727 seq;
6728 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
6729 if (success)
6730 goto label;
6732 Note that we only do the plain load from memory once. Subsequent
6733 iterations use the value loaded by the compare-and-swap pattern. */
6735 label = gen_label_rtx ();
6736 cmp_reg = gen_reg_rtx (mode);
6738 emit_move_insn (cmp_reg, mem);
6739 emit_label (label);
6740 emit_move_insn (old_reg, cmp_reg);
6741 if (seq)
6742 emit_insn (seq);
6744 success = NULL_RTX;
6745 oldval = cmp_reg;
6746 if (!expand_atomic_compare_and_swap (&success, &oldval, mem, old_reg,
6747 new_reg, false, MEMMODEL_SYNC_SEQ_CST,
6748 MEMMODEL_RELAXED))
6749 return false;
6751 if (oldval != cmp_reg)
6752 emit_move_insn (cmp_reg, oldval);
6754 /* Mark this jump predicted not taken. */
6755 emit_cmp_and_jump_insns (success, const0_rtx, EQ, const0_rtx,
6756 GET_MODE (success), 1, label,
6757 profile_probability::guessed_never ());
6758 return true;
6762 /* This function tries to emit an atomic_exchange intruction. VAL is written
6763 to *MEM using memory model MODEL. The previous contents of *MEM are returned,
6764 using TARGET if possible. */
6766 static rtx
6767 maybe_emit_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
6769 machine_mode mode = GET_MODE (mem);
6770 enum insn_code icode;
6772 /* If the target supports the exchange directly, great. */
6773 icode = direct_optab_handler (atomic_exchange_optab, mode);
6774 if (icode != CODE_FOR_nothing)
6776 class expand_operand ops[4];
6778 create_output_operand (&ops[0], target, mode);
6779 create_fixed_operand (&ops[1], mem);
6780 create_input_operand (&ops[2], val, mode);
6781 create_integer_operand (&ops[3], model);
6782 if (maybe_expand_insn (icode, 4, ops))
6783 return ops[0].value;
6786 return NULL_RTX;
6789 /* This function tries to implement an atomic exchange operation using
6790 __sync_lock_test_and_set. VAL is written to *MEM using memory model MODEL.
6791 The previous contents of *MEM are returned, using TARGET if possible.
6792 Since this instructionn is an acquire barrier only, stronger memory
6793 models may require additional barriers to be emitted. */
6795 static rtx
6796 maybe_emit_sync_lock_test_and_set (rtx target, rtx mem, rtx val,
6797 enum memmodel model)
6799 machine_mode mode = GET_MODE (mem);
6800 enum insn_code icode;
6801 rtx_insn *last_insn = get_last_insn ();
6803 icode = optab_handler (sync_lock_test_and_set_optab, mode);
6805 /* Legacy sync_lock_test_and_set is an acquire barrier. If the pattern
6806 exists, and the memory model is stronger than acquire, add a release
6807 barrier before the instruction. */
6809 if (is_mm_seq_cst (model) || is_mm_release (model) || is_mm_acq_rel (model))
6810 expand_mem_thread_fence (model);
6812 if (icode != CODE_FOR_nothing)
6814 class expand_operand ops[3];
6815 create_output_operand (&ops[0], target, mode);
6816 create_fixed_operand (&ops[1], mem);
6817 create_input_operand (&ops[2], val, mode);
6818 if (maybe_expand_insn (icode, 3, ops))
6819 return ops[0].value;
6822 /* If an external test-and-set libcall is provided, use that instead of
6823 any external compare-and-swap that we might get from the compare-and-
6824 swap-loop expansion later. */
6825 if (!can_compare_and_swap_p (mode, false))
6827 rtx libfunc = optab_libfunc (sync_lock_test_and_set_optab, mode);
6828 if (libfunc != NULL)
6830 rtx addr;
6832 addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
6833 return emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL,
6834 mode, addr, ptr_mode,
6835 val, mode);
6839 /* If the test_and_set can't be emitted, eliminate any barrier that might
6840 have been emitted. */
6841 delete_insns_since (last_insn);
6842 return NULL_RTX;
6845 /* This function tries to implement an atomic exchange operation using a
6846 compare_and_swap loop. VAL is written to *MEM. The previous contents of
6847 *MEM are returned, using TARGET if possible. No memory model is required
6848 since a compare_and_swap loop is seq-cst. */
6850 static rtx
6851 maybe_emit_compare_and_swap_exchange_loop (rtx target, rtx mem, rtx val)
6853 machine_mode mode = GET_MODE (mem);
6855 if (can_compare_and_swap_p (mode, true))
6857 if (!target || !register_operand (target, mode))
6858 target = gen_reg_rtx (mode);
6859 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
6860 return target;
6863 return NULL_RTX;
6866 /* This function tries to implement an atomic test-and-set operation
6867 using the atomic_test_and_set instruction pattern. A boolean value
6868 is returned from the operation, using TARGET if possible. */
6870 static rtx
6871 maybe_emit_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
6873 machine_mode pat_bool_mode;
6874 class expand_operand ops[3];
6876 if (!targetm.have_atomic_test_and_set ())
6877 return NULL_RTX;
6879 /* While we always get QImode from __atomic_test_and_set, we get
6880 other memory modes from __sync_lock_test_and_set. Note that we
6881 use no endian adjustment here. This matches the 4.6 behavior
6882 in the Sparc backend. */
6883 enum insn_code icode = targetm.code_for_atomic_test_and_set;
6884 gcc_checking_assert (insn_data[icode].operand[1].mode == QImode);
6885 if (GET_MODE (mem) != QImode)
6886 mem = adjust_address_nv (mem, QImode, 0);
6888 pat_bool_mode = insn_data[icode].operand[0].mode;
6889 create_output_operand (&ops[0], target, pat_bool_mode);
6890 create_fixed_operand (&ops[1], mem);
6891 create_integer_operand (&ops[2], model);
6893 if (maybe_expand_insn (icode, 3, ops))
6894 return ops[0].value;
6895 return NULL_RTX;
6898 /* This function expands the legacy _sync_lock test_and_set operation which is
6899 generally an atomic exchange. Some limited targets only allow the
6900 constant 1 to be stored. This is an ACQUIRE operation.
6902 TARGET is an optional place to stick the return value.
6903 MEM is where VAL is stored. */
6906 expand_sync_lock_test_and_set (rtx target, rtx mem, rtx val)
6908 rtx ret;
6910 /* Try an atomic_exchange first. */
6911 ret = maybe_emit_atomic_exchange (target, mem, val, MEMMODEL_SYNC_ACQUIRE);
6912 if (ret)
6913 return ret;
6915 ret = maybe_emit_sync_lock_test_and_set (target, mem, val,
6916 MEMMODEL_SYNC_ACQUIRE);
6917 if (ret)
6918 return ret;
6920 ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val);
6921 if (ret)
6922 return ret;
6924 /* If there are no other options, try atomic_test_and_set if the value
6925 being stored is 1. */
6926 if (val == const1_rtx)
6927 ret = maybe_emit_atomic_test_and_set (target, mem, MEMMODEL_SYNC_ACQUIRE);
6929 return ret;
6932 /* This function expands the atomic test_and_set operation:
6933 atomically store a boolean TRUE into MEM and return the previous value.
6935 MEMMODEL is the memory model variant to use.
6936 TARGET is an optional place to stick the return value. */
6939 expand_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
6941 machine_mode mode = GET_MODE (mem);
6942 rtx ret, trueval, subtarget;
6944 ret = maybe_emit_atomic_test_and_set (target, mem, model);
6945 if (ret)
6946 return ret;
6948 /* Be binary compatible with non-default settings of trueval, and different
6949 cpu revisions. E.g. one revision may have atomic-test-and-set, but
6950 another only has atomic-exchange. */
6951 if (targetm.atomic_test_and_set_trueval == 1)
6953 trueval = const1_rtx;
6954 subtarget = target ? target : gen_reg_rtx (mode);
6956 else
6958 trueval = gen_int_mode (targetm.atomic_test_and_set_trueval, mode);
6959 subtarget = gen_reg_rtx (mode);
6962 /* Try the atomic-exchange optab... */
6963 ret = maybe_emit_atomic_exchange (subtarget, mem, trueval, model);
6965 /* ... then an atomic-compare-and-swap loop ... */
6966 if (!ret)
6967 ret = maybe_emit_compare_and_swap_exchange_loop (subtarget, mem, trueval);
6969 /* ... before trying the vaguely defined legacy lock_test_and_set. */
6970 if (!ret)
6971 ret = maybe_emit_sync_lock_test_and_set (subtarget, mem, trueval, model);
6973 /* Recall that the legacy lock_test_and_set optab was allowed to do magic
6974 things with the value 1. Thus we try again without trueval. */
6975 if (!ret && targetm.atomic_test_and_set_trueval != 1)
6976 ret = maybe_emit_sync_lock_test_and_set (subtarget, mem, const1_rtx, model);
6978 /* Failing all else, assume a single threaded environment and simply
6979 perform the operation. */
6980 if (!ret)
6982 /* If the result is ignored skip the move to target. */
6983 if (subtarget != const0_rtx)
6984 emit_move_insn (subtarget, mem);
6986 emit_move_insn (mem, trueval);
6987 ret = subtarget;
6990 /* Recall that have to return a boolean value; rectify if trueval
6991 is not exactly one. */
6992 if (targetm.atomic_test_and_set_trueval != 1)
6993 ret = emit_store_flag_force (target, NE, ret, const0_rtx, mode, 0, 1);
6995 return ret;
6998 /* This function expands the atomic exchange operation:
6999 atomically store VAL in MEM and return the previous value in MEM.
7001 MEMMODEL is the memory model variant to use.
7002 TARGET is an optional place to stick the return value. */
7005 expand_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
7007 machine_mode mode = GET_MODE (mem);
7008 rtx ret;
7010 /* If loads are not atomic for the required size and we are not called to
7011 provide a __sync builtin, do not do anything so that we stay consistent
7012 with atomic loads of the same size. */
7013 if (!can_atomic_load_p (mode) && !is_mm_sync (model))
7014 return NULL_RTX;
7016 ret = maybe_emit_atomic_exchange (target, mem, val, model);
7018 /* Next try a compare-and-swap loop for the exchange. */
7019 if (!ret)
7020 ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val);
7022 return ret;
7025 /* This function expands the atomic compare exchange operation:
7027 *PTARGET_BOOL is an optional place to store the boolean success/failure.
7028 *PTARGET_OVAL is an optional place to store the old value from memory.
7029 Both target parameters may be NULL or const0_rtx to indicate that we do
7030 not care about that return value. Both target parameters are updated on
7031 success to the actual location of the corresponding result.
7033 MEMMODEL is the memory model variant to use.
7035 The return value of the function is true for success. */
7037 bool
7038 expand_atomic_compare_and_swap (rtx *ptarget_bool, rtx *ptarget_oval,
7039 rtx mem, rtx expected, rtx desired,
7040 bool is_weak, enum memmodel succ_model,
7041 enum memmodel fail_model)
7043 machine_mode mode = GET_MODE (mem);
7044 class expand_operand ops[8];
7045 enum insn_code icode;
7046 rtx target_oval, target_bool = NULL_RTX;
7047 rtx libfunc;
7049 /* If loads are not atomic for the required size and we are not called to
7050 provide a __sync builtin, do not do anything so that we stay consistent
7051 with atomic loads of the same size. */
7052 if (!can_atomic_load_p (mode) && !is_mm_sync (succ_model))
7053 return false;
7055 /* Load expected into a register for the compare and swap. */
7056 if (MEM_P (expected))
7057 expected = copy_to_reg (expected);
7059 /* Make sure we always have some place to put the return oldval.
7060 Further, make sure that place is distinct from the input expected,
7061 just in case we need that path down below. */
7062 if (ptarget_oval && *ptarget_oval == const0_rtx)
7063 ptarget_oval = NULL;
7065 if (ptarget_oval == NULL
7066 || (target_oval = *ptarget_oval) == NULL
7067 || reg_overlap_mentioned_p (expected, target_oval))
7068 target_oval = gen_reg_rtx (mode);
7070 icode = direct_optab_handler (atomic_compare_and_swap_optab, mode);
7071 if (icode != CODE_FOR_nothing)
7073 machine_mode bool_mode = insn_data[icode].operand[0].mode;
7075 if (ptarget_bool && *ptarget_bool == const0_rtx)
7076 ptarget_bool = NULL;
7078 /* Make sure we always have a place for the bool operand. */
7079 if (ptarget_bool == NULL
7080 || (target_bool = *ptarget_bool) == NULL
7081 || GET_MODE (target_bool) != bool_mode)
7082 target_bool = gen_reg_rtx (bool_mode);
7084 /* Emit the compare_and_swap. */
7085 create_output_operand (&ops[0], target_bool, bool_mode);
7086 create_output_operand (&ops[1], target_oval, mode);
7087 create_fixed_operand (&ops[2], mem);
7088 create_input_operand (&ops[3], expected, mode);
7089 create_input_operand (&ops[4], desired, mode);
7090 create_integer_operand (&ops[5], is_weak);
7091 create_integer_operand (&ops[6], succ_model);
7092 create_integer_operand (&ops[7], fail_model);
7093 if (maybe_expand_insn (icode, 8, ops))
7095 /* Return success/failure. */
7096 target_bool = ops[0].value;
7097 target_oval = ops[1].value;
7098 goto success;
7102 /* Otherwise fall back to the original __sync_val_compare_and_swap
7103 which is always seq-cst. */
7104 icode = optab_handler (sync_compare_and_swap_optab, mode);
7105 if (icode != CODE_FOR_nothing)
7107 rtx cc_reg;
7109 create_output_operand (&ops[0], target_oval, mode);
7110 create_fixed_operand (&ops[1], mem);
7111 create_input_operand (&ops[2], expected, mode);
7112 create_input_operand (&ops[3], desired, mode);
7113 if (!maybe_expand_insn (icode, 4, ops))
7114 return false;
7116 target_oval = ops[0].value;
7118 /* If the caller isn't interested in the boolean return value,
7119 skip the computation of it. */
7120 if (ptarget_bool == NULL)
7121 goto success;
7123 /* Otherwise, work out if the compare-and-swap succeeded. */
7124 cc_reg = NULL_RTX;
7125 if (have_insn_for (COMPARE, CCmode))
7126 note_stores (get_last_insn (), find_cc_set, &cc_reg);
7127 if (cc_reg)
7129 target_bool = emit_store_flag_force (target_bool, EQ, cc_reg,
7130 const0_rtx, VOIDmode, 0, 1);
7131 goto success;
7133 goto success_bool_from_val;
7136 /* Also check for library support for __sync_val_compare_and_swap. */
7137 libfunc = optab_libfunc (sync_compare_and_swap_optab, mode);
7138 if (libfunc != NULL)
7140 rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
7141 rtx target = emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL,
7142 mode, addr, ptr_mode,
7143 expected, mode, desired, mode);
7144 emit_move_insn (target_oval, target);
7146 /* Compute the boolean return value only if requested. */
7147 if (ptarget_bool)
7148 goto success_bool_from_val;
7149 else
7150 goto success;
7153 /* Failure. */
7154 return false;
7156 success_bool_from_val:
7157 target_bool = emit_store_flag_force (target_bool, EQ, target_oval,
7158 expected, VOIDmode, 1, 1);
7159 success:
7160 /* Make sure that the oval output winds up where the caller asked. */
7161 if (ptarget_oval)
7162 *ptarget_oval = target_oval;
7163 if (ptarget_bool)
7164 *ptarget_bool = target_bool;
7165 return true;
7168 /* Generate asm volatile("" : : : "memory") as the memory blockage. */
7170 static void
7171 expand_asm_memory_blockage (void)
7173 rtx asm_op, clob;
7175 asm_op = gen_rtx_ASM_OPERANDS (VOIDmode, "", "", 0,
7176 rtvec_alloc (0), rtvec_alloc (0),
7177 rtvec_alloc (0), UNKNOWN_LOCATION);
7178 MEM_VOLATILE_P (asm_op) = 1;
7180 clob = gen_rtx_SCRATCH (VOIDmode);
7181 clob = gen_rtx_MEM (BLKmode, clob);
7182 clob = gen_rtx_CLOBBER (VOIDmode, clob);
7184 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, asm_op, clob)));
7187 /* Do not propagate memory accesses across this point. */
7189 static void
7190 expand_memory_blockage (void)
7192 if (targetm.have_memory_blockage ())
7193 emit_insn (targetm.gen_memory_blockage ());
7194 else
7195 expand_asm_memory_blockage ();
7198 /* Generate asm volatile("" : : : "memory") as a memory blockage, at the
7199 same time clobbering the register set specified by REGS. */
7201 void
7202 expand_asm_reg_clobber_mem_blockage (HARD_REG_SET regs)
7204 rtx asm_op, clob_mem;
7206 unsigned int num_of_regs = 0;
7207 for (unsigned int i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7208 if (TEST_HARD_REG_BIT (regs, i))
7209 num_of_regs++;
7211 asm_op = gen_rtx_ASM_OPERANDS (VOIDmode, "", "", 0,
7212 rtvec_alloc (0), rtvec_alloc (0),
7213 rtvec_alloc (0), UNKNOWN_LOCATION);
7214 MEM_VOLATILE_P (asm_op) = 1;
7216 rtvec v = rtvec_alloc (num_of_regs + 2);
7218 clob_mem = gen_rtx_SCRATCH (VOIDmode);
7219 clob_mem = gen_rtx_MEM (BLKmode, clob_mem);
7220 clob_mem = gen_rtx_CLOBBER (VOIDmode, clob_mem);
7222 RTVEC_ELT (v, 0) = asm_op;
7223 RTVEC_ELT (v, 1) = clob_mem;
7225 if (num_of_regs > 0)
7227 unsigned int j = 2;
7228 for (unsigned int i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7229 if (TEST_HARD_REG_BIT (regs, i))
7231 RTVEC_ELT (v, j) = gen_rtx_CLOBBER (VOIDmode, regno_reg_rtx[i]);
7232 j++;
7234 gcc_assert (j == (num_of_regs + 2));
7237 emit_insn (gen_rtx_PARALLEL (VOIDmode, v));
7240 /* This routine will either emit the mem_thread_fence pattern or issue a
7241 sync_synchronize to generate a fence for memory model MEMMODEL. */
7243 void
7244 expand_mem_thread_fence (enum memmodel model)
7246 if (is_mm_relaxed (model))
7247 return;
7248 if (targetm.have_mem_thread_fence ())
7250 emit_insn (targetm.gen_mem_thread_fence (GEN_INT (model)));
7251 expand_memory_blockage ();
7253 else if (targetm.have_memory_barrier ())
7254 emit_insn (targetm.gen_memory_barrier ());
7255 else if (synchronize_libfunc != NULL_RTX)
7256 emit_library_call (synchronize_libfunc, LCT_NORMAL, VOIDmode);
7257 else
7258 expand_memory_blockage ();
7261 /* Emit a signal fence with given memory model. */
7263 void
7264 expand_mem_signal_fence (enum memmodel model)
7266 /* No machine barrier is required to implement a signal fence, but
7267 a compiler memory barrier must be issued, except for relaxed MM. */
7268 if (!is_mm_relaxed (model))
7269 expand_memory_blockage ();
7272 /* This function expands the atomic load operation:
7273 return the atomically loaded value in MEM.
7275 MEMMODEL is the memory model variant to use.
7276 TARGET is an option place to stick the return value. */
7279 expand_atomic_load (rtx target, rtx mem, enum memmodel model)
7281 machine_mode mode = GET_MODE (mem);
7282 enum insn_code icode;
7284 /* If the target supports the load directly, great. */
7285 icode = direct_optab_handler (atomic_load_optab, mode);
7286 if (icode != CODE_FOR_nothing)
7288 class expand_operand ops[3];
7289 rtx_insn *last = get_last_insn ();
7290 if (is_mm_seq_cst (model))
7291 expand_memory_blockage ();
7293 create_output_operand (&ops[0], target, mode);
7294 create_fixed_operand (&ops[1], mem);
7295 create_integer_operand (&ops[2], model);
7296 if (maybe_expand_insn (icode, 3, ops))
7298 if (!is_mm_relaxed (model))
7299 expand_memory_blockage ();
7300 return ops[0].value;
7302 delete_insns_since (last);
7305 /* If the size of the object is greater than word size on this target,
7306 then we assume that a load will not be atomic. We could try to
7307 emulate a load with a compare-and-swap operation, but the store that
7308 doing this could result in would be incorrect if this is a volatile
7309 atomic load or targetting read-only-mapped memory. */
7310 if (maybe_gt (GET_MODE_PRECISION (mode), BITS_PER_WORD))
7311 /* If there is no atomic load, leave the library call. */
7312 return NULL_RTX;
7314 /* Otherwise assume loads are atomic, and emit the proper barriers. */
7315 if (!target || target == const0_rtx)
7316 target = gen_reg_rtx (mode);
7318 /* For SEQ_CST, emit a barrier before the load. */
7319 if (is_mm_seq_cst (model))
7320 expand_mem_thread_fence (model);
7322 emit_move_insn (target, mem);
7324 /* Emit the appropriate barrier after the load. */
7325 expand_mem_thread_fence (model);
7327 return target;
7330 /* This function expands the atomic store operation:
7331 Atomically store VAL in MEM.
7332 MEMMODEL is the memory model variant to use.
7333 USE_RELEASE is true if __sync_lock_release can be used as a fall back.
7334 function returns const0_rtx if a pattern was emitted. */
7337 expand_atomic_store (rtx mem, rtx val, enum memmodel model, bool use_release)
7339 machine_mode mode = GET_MODE (mem);
7340 enum insn_code icode;
7341 class expand_operand ops[3];
7343 /* If the target supports the store directly, great. */
7344 icode = direct_optab_handler (atomic_store_optab, mode);
7345 if (icode != CODE_FOR_nothing)
7347 rtx_insn *last = get_last_insn ();
7348 if (!is_mm_relaxed (model))
7349 expand_memory_blockage ();
7350 create_fixed_operand (&ops[0], mem);
7351 create_input_operand (&ops[1], val, mode);
7352 create_integer_operand (&ops[2], model);
7353 if (maybe_expand_insn (icode, 3, ops))
7355 if (is_mm_seq_cst (model))
7356 expand_memory_blockage ();
7357 return const0_rtx;
7359 delete_insns_since (last);
7362 /* If using __sync_lock_release is a viable alternative, try it.
7363 Note that this will not be set to true if we are expanding a generic
7364 __atomic_store_n. */
7365 if (use_release)
7367 icode = direct_optab_handler (sync_lock_release_optab, mode);
7368 if (icode != CODE_FOR_nothing)
7370 create_fixed_operand (&ops[0], mem);
7371 create_input_operand (&ops[1], const0_rtx, mode);
7372 if (maybe_expand_insn (icode, 2, ops))
7374 /* lock_release is only a release barrier. */
7375 if (is_mm_seq_cst (model))
7376 expand_mem_thread_fence (model);
7377 return const0_rtx;
7382 /* If the size of the object is greater than word size on this target,
7383 a default store will not be atomic. */
7384 if (maybe_gt (GET_MODE_PRECISION (mode), BITS_PER_WORD))
7386 /* If loads are atomic or we are called to provide a __sync builtin,
7387 we can try a atomic_exchange and throw away the result. Otherwise,
7388 don't do anything so that we do not create an inconsistency between
7389 loads and stores. */
7390 if (can_atomic_load_p (mode) || is_mm_sync (model))
7392 rtx target = maybe_emit_atomic_exchange (NULL_RTX, mem, val, model);
7393 if (!target)
7394 target = maybe_emit_compare_and_swap_exchange_loop (NULL_RTX, mem,
7395 val);
7396 if (target)
7397 return const0_rtx;
7399 return NULL_RTX;
7402 /* Otherwise assume stores are atomic, and emit the proper barriers. */
7403 expand_mem_thread_fence (model);
7405 emit_move_insn (mem, val);
7407 /* For SEQ_CST, also emit a barrier after the store. */
7408 if (is_mm_seq_cst (model))
7409 expand_mem_thread_fence (model);
7411 return const0_rtx;
7415 /* Structure containing the pointers and values required to process the
7416 various forms of the atomic_fetch_op and atomic_op_fetch builtins. */
7418 struct atomic_op_functions
7420 direct_optab mem_fetch_before;
7421 direct_optab mem_fetch_after;
7422 direct_optab mem_no_result;
7423 optab fetch_before;
7424 optab fetch_after;
7425 direct_optab no_result;
7426 enum rtx_code reverse_code;
7430 /* Fill in structure pointed to by OP with the various optab entries for an
7431 operation of type CODE. */
7433 static void
7434 get_atomic_op_for_code (struct atomic_op_functions *op, enum rtx_code code)
7436 gcc_assert (op!= NULL);
7438 /* If SWITCHABLE_TARGET is defined, then subtargets can be switched
7439 in the source code during compilation, and the optab entries are not
7440 computable until runtime. Fill in the values at runtime. */
7441 switch (code)
7443 case PLUS:
7444 op->mem_fetch_before = atomic_fetch_add_optab;
7445 op->mem_fetch_after = atomic_add_fetch_optab;
7446 op->mem_no_result = atomic_add_optab;
7447 op->fetch_before = sync_old_add_optab;
7448 op->fetch_after = sync_new_add_optab;
7449 op->no_result = sync_add_optab;
7450 op->reverse_code = MINUS;
7451 break;
7452 case MINUS:
7453 op->mem_fetch_before = atomic_fetch_sub_optab;
7454 op->mem_fetch_after = atomic_sub_fetch_optab;
7455 op->mem_no_result = atomic_sub_optab;
7456 op->fetch_before = sync_old_sub_optab;
7457 op->fetch_after = sync_new_sub_optab;
7458 op->no_result = sync_sub_optab;
7459 op->reverse_code = PLUS;
7460 break;
7461 case XOR:
7462 op->mem_fetch_before = atomic_fetch_xor_optab;
7463 op->mem_fetch_after = atomic_xor_fetch_optab;
7464 op->mem_no_result = atomic_xor_optab;
7465 op->fetch_before = sync_old_xor_optab;
7466 op->fetch_after = sync_new_xor_optab;
7467 op->no_result = sync_xor_optab;
7468 op->reverse_code = XOR;
7469 break;
7470 case AND:
7471 op->mem_fetch_before = atomic_fetch_and_optab;
7472 op->mem_fetch_after = atomic_and_fetch_optab;
7473 op->mem_no_result = atomic_and_optab;
7474 op->fetch_before = sync_old_and_optab;
7475 op->fetch_after = sync_new_and_optab;
7476 op->no_result = sync_and_optab;
7477 op->reverse_code = UNKNOWN;
7478 break;
7479 case IOR:
7480 op->mem_fetch_before = atomic_fetch_or_optab;
7481 op->mem_fetch_after = atomic_or_fetch_optab;
7482 op->mem_no_result = atomic_or_optab;
7483 op->fetch_before = sync_old_ior_optab;
7484 op->fetch_after = sync_new_ior_optab;
7485 op->no_result = sync_ior_optab;
7486 op->reverse_code = UNKNOWN;
7487 break;
7488 case NOT:
7489 op->mem_fetch_before = atomic_fetch_nand_optab;
7490 op->mem_fetch_after = atomic_nand_fetch_optab;
7491 op->mem_no_result = atomic_nand_optab;
7492 op->fetch_before = sync_old_nand_optab;
7493 op->fetch_after = sync_new_nand_optab;
7494 op->no_result = sync_nand_optab;
7495 op->reverse_code = UNKNOWN;
7496 break;
7497 default:
7498 gcc_unreachable ();
7502 /* See if there is a more optimal way to implement the operation "*MEM CODE VAL"
7503 using memory order MODEL. If AFTER is true the operation needs to return
7504 the value of *MEM after the operation, otherwise the previous value.
7505 TARGET is an optional place to place the result. The result is unused if
7506 it is const0_rtx.
7507 Return the result if there is a better sequence, otherwise NULL_RTX. */
7509 static rtx
7510 maybe_optimize_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
7511 enum memmodel model, bool after)
7513 /* If the value is prefetched, or not used, it may be possible to replace
7514 the sequence with a native exchange operation. */
7515 if (!after || target == const0_rtx)
7517 /* fetch_and (&x, 0, m) can be replaced with exchange (&x, 0, m). */
7518 if (code == AND && val == const0_rtx)
7520 if (target == const0_rtx)
7521 target = gen_reg_rtx (GET_MODE (mem));
7522 return maybe_emit_atomic_exchange (target, mem, val, model);
7525 /* fetch_or (&x, -1, m) can be replaced with exchange (&x, -1, m). */
7526 if (code == IOR && val == constm1_rtx)
7528 if (target == const0_rtx)
7529 target = gen_reg_rtx (GET_MODE (mem));
7530 return maybe_emit_atomic_exchange (target, mem, val, model);
7534 return NULL_RTX;
7537 /* Try to emit an instruction for a specific operation varaition.
7538 OPTAB contains the OP functions.
7539 TARGET is an optional place to return the result. const0_rtx means unused.
7540 MEM is the memory location to operate on.
7541 VAL is the value to use in the operation.
7542 USE_MEMMODEL is TRUE if the variation with a memory model should be tried.
7543 MODEL is the memory model, if used.
7544 AFTER is true if the returned result is the value after the operation. */
7546 static rtx
7547 maybe_emit_op (const struct atomic_op_functions *optab, rtx target, rtx mem,
7548 rtx val, bool use_memmodel, enum memmodel model, bool after)
7550 machine_mode mode = GET_MODE (mem);
7551 class expand_operand ops[4];
7552 enum insn_code icode;
7553 int op_counter = 0;
7554 int num_ops;
7556 /* Check to see if there is a result returned. */
7557 if (target == const0_rtx)
7559 if (use_memmodel)
7561 icode = direct_optab_handler (optab->mem_no_result, mode);
7562 create_integer_operand (&ops[2], model);
7563 num_ops = 3;
7565 else
7567 icode = direct_optab_handler (optab->no_result, mode);
7568 num_ops = 2;
7571 /* Otherwise, we need to generate a result. */
7572 else
7574 if (use_memmodel)
7576 icode = direct_optab_handler (after ? optab->mem_fetch_after
7577 : optab->mem_fetch_before, mode);
7578 create_integer_operand (&ops[3], model);
7579 num_ops = 4;
7581 else
7583 icode = optab_handler (after ? optab->fetch_after
7584 : optab->fetch_before, mode);
7585 num_ops = 3;
7587 create_output_operand (&ops[op_counter++], target, mode);
7589 if (icode == CODE_FOR_nothing)
7590 return NULL_RTX;
7592 create_fixed_operand (&ops[op_counter++], mem);
7593 /* VAL may have been promoted to a wider mode. Shrink it if so. */
7594 create_convert_operand_to (&ops[op_counter++], val, mode, true);
7596 if (maybe_expand_insn (icode, num_ops, ops))
7597 return (target == const0_rtx ? const0_rtx : ops[0].value);
7599 return NULL_RTX;
7603 /* This function expands an atomic fetch_OP or OP_fetch operation:
7604 TARGET is an option place to stick the return value. const0_rtx indicates
7605 the result is unused.
7606 atomically fetch MEM, perform the operation with VAL and return it to MEM.
7607 CODE is the operation being performed (OP)
7608 MEMMODEL is the memory model variant to use.
7609 AFTER is true to return the result of the operation (OP_fetch).
7610 AFTER is false to return the value before the operation (fetch_OP).
7612 This function will *only* generate instructions if there is a direct
7613 optab. No compare and swap loops or libcalls will be generated. */
7615 static rtx
7616 expand_atomic_fetch_op_no_fallback (rtx target, rtx mem, rtx val,
7617 enum rtx_code code, enum memmodel model,
7618 bool after)
7620 machine_mode mode = GET_MODE (mem);
7621 struct atomic_op_functions optab;
7622 rtx result;
7623 bool unused_result = (target == const0_rtx);
7625 get_atomic_op_for_code (&optab, code);
7627 /* Check to see if there are any better instructions. */
7628 result = maybe_optimize_fetch_op (target, mem, val, code, model, after);
7629 if (result)
7630 return result;
7632 /* Check for the case where the result isn't used and try those patterns. */
7633 if (unused_result)
7635 /* Try the memory model variant first. */
7636 result = maybe_emit_op (&optab, target, mem, val, true, model, true);
7637 if (result)
7638 return result;
7640 /* Next try the old style withuot a memory model. */
7641 result = maybe_emit_op (&optab, target, mem, val, false, model, true);
7642 if (result)
7643 return result;
7645 /* There is no no-result pattern, so try patterns with a result. */
7646 target = NULL_RTX;
7649 /* Try the __atomic version. */
7650 result = maybe_emit_op (&optab, target, mem, val, true, model, after);
7651 if (result)
7652 return result;
7654 /* Try the older __sync version. */
7655 result = maybe_emit_op (&optab, target, mem, val, false, model, after);
7656 if (result)
7657 return result;
7659 /* If the fetch value can be calculated from the other variation of fetch,
7660 try that operation. */
7661 if (after || unused_result || optab.reverse_code != UNKNOWN)
7663 /* Try the __atomic version, then the older __sync version. */
7664 result = maybe_emit_op (&optab, target, mem, val, true, model, !after);
7665 if (!result)
7666 result = maybe_emit_op (&optab, target, mem, val, false, model, !after);
7668 if (result)
7670 /* If the result isn't used, no need to do compensation code. */
7671 if (unused_result)
7672 return result;
7674 /* Issue compensation code. Fetch_after == fetch_before OP val.
7675 Fetch_before == after REVERSE_OP val. */
7676 if (!after)
7677 code = optab.reverse_code;
7678 if (code == NOT)
7680 result = expand_simple_binop (mode, AND, result, val, NULL_RTX,
7681 true, OPTAB_LIB_WIDEN);
7682 result = expand_simple_unop (mode, NOT, result, target, true);
7684 else
7685 result = expand_simple_binop (mode, code, result, val, target,
7686 true, OPTAB_LIB_WIDEN);
7687 return result;
7691 /* No direct opcode can be generated. */
7692 return NULL_RTX;
7697 /* This function expands an atomic fetch_OP or OP_fetch operation:
7698 TARGET is an option place to stick the return value. const0_rtx indicates
7699 the result is unused.
7700 atomically fetch MEM, perform the operation with VAL and return it to MEM.
7701 CODE is the operation being performed (OP)
7702 MEMMODEL is the memory model variant to use.
7703 AFTER is true to return the result of the operation (OP_fetch).
7704 AFTER is false to return the value before the operation (fetch_OP). */
7706 expand_atomic_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
7707 enum memmodel model, bool after)
7709 machine_mode mode = GET_MODE (mem);
7710 rtx result;
7711 bool unused_result = (target == const0_rtx);
7713 /* If loads are not atomic for the required size and we are not called to
7714 provide a __sync builtin, do not do anything so that we stay consistent
7715 with atomic loads of the same size. */
7716 if (!can_atomic_load_p (mode) && !is_mm_sync (model))
7717 return NULL_RTX;
7719 result = expand_atomic_fetch_op_no_fallback (target, mem, val, code, model,
7720 after);
7722 if (result)
7723 return result;
7725 /* Add/sub can be implemented by doing the reverse operation with -(val). */
7726 if (code == PLUS || code == MINUS)
7728 rtx tmp;
7729 enum rtx_code reverse = (code == PLUS ? MINUS : PLUS);
7731 start_sequence ();
7732 tmp = expand_simple_unop (mode, NEG, val, NULL_RTX, true);
7733 result = expand_atomic_fetch_op_no_fallback (target, mem, tmp, reverse,
7734 model, after);
7735 if (result)
7737 /* PLUS worked so emit the insns and return. */
7738 tmp = get_insns ();
7739 end_sequence ();
7740 emit_insn (tmp);
7741 return result;
7744 /* PLUS did not work, so throw away the negation code and continue. */
7745 end_sequence ();
7748 /* Try the __sync libcalls only if we can't do compare-and-swap inline. */
7749 if (!can_compare_and_swap_p (mode, false))
7751 rtx libfunc;
7752 bool fixup = false;
7753 enum rtx_code orig_code = code;
7754 struct atomic_op_functions optab;
7756 get_atomic_op_for_code (&optab, code);
7757 libfunc = optab_libfunc (after ? optab.fetch_after
7758 : optab.fetch_before, mode);
7759 if (libfunc == NULL
7760 && (after || unused_result || optab.reverse_code != UNKNOWN))
7762 fixup = true;
7763 if (!after)
7764 code = optab.reverse_code;
7765 libfunc = optab_libfunc (after ? optab.fetch_before
7766 : optab.fetch_after, mode);
7768 if (libfunc != NULL)
7770 rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
7771 result = emit_library_call_value (libfunc, NULL, LCT_NORMAL, mode,
7772 addr, ptr_mode, val, mode);
7774 if (!unused_result && fixup)
7775 result = expand_simple_binop (mode, code, result, val, target,
7776 true, OPTAB_LIB_WIDEN);
7777 return result;
7780 /* We need the original code for any further attempts. */
7781 code = orig_code;
7784 /* If nothing else has succeeded, default to a compare and swap loop. */
7785 if (can_compare_and_swap_p (mode, true))
7787 rtx_insn *insn;
7788 rtx t0 = gen_reg_rtx (mode), t1;
7790 start_sequence ();
7792 /* If the result is used, get a register for it. */
7793 if (!unused_result)
7795 if (!target || !register_operand (target, mode))
7796 target = gen_reg_rtx (mode);
7797 /* If fetch_before, copy the value now. */
7798 if (!after)
7799 emit_move_insn (target, t0);
7801 else
7802 target = const0_rtx;
7804 t1 = t0;
7805 if (code == NOT)
7807 t1 = expand_simple_binop (mode, AND, t1, val, NULL_RTX,
7808 true, OPTAB_LIB_WIDEN);
7809 t1 = expand_simple_unop (mode, code, t1, NULL_RTX, true);
7811 else
7812 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX, true,
7813 OPTAB_LIB_WIDEN);
7815 /* For after, copy the value now. */
7816 if (!unused_result && after)
7817 emit_move_insn (target, t1);
7818 insn = get_insns ();
7819 end_sequence ();
7821 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
7822 return target;
7825 return NULL_RTX;
7828 /* Return true if OPERAND is suitable for operand number OPNO of
7829 instruction ICODE. */
7831 bool
7832 insn_operand_matches (enum insn_code icode, unsigned int opno, rtx operand)
7834 return (!insn_data[(int) icode].operand[opno].predicate
7835 || (insn_data[(int) icode].operand[opno].predicate
7836 (operand, insn_data[(int) icode].operand[opno].mode)));
7839 /* TARGET is a target of a multiword operation that we are going to
7840 implement as a series of word-mode operations. Return true if
7841 TARGET is suitable for this purpose. */
7843 bool
7844 valid_multiword_target_p (rtx target)
7846 machine_mode mode;
7847 int i, size;
7849 mode = GET_MODE (target);
7850 if (!GET_MODE_SIZE (mode).is_constant (&size))
7851 return false;
7852 for (i = 0; i < size; i += UNITS_PER_WORD)
7853 if (!validate_subreg (word_mode, mode, target, i))
7854 return false;
7855 return true;
7858 /* Make OP describe an input operand that has value INTVAL and that has
7859 no inherent mode. This function should only be used for operands that
7860 are always expand-time constants. The backend may request that INTVAL
7861 be copied into a different kind of rtx, but it must specify the mode
7862 of that rtx if so. */
7864 void
7865 create_integer_operand (class expand_operand *op, poly_int64 intval)
7867 create_expand_operand (op, EXPAND_INTEGER,
7868 gen_int_mode (intval, MAX_MODE_INT),
7869 VOIDmode, false, intval);
7872 /* Like maybe_legitimize_operand, but do not change the code of the
7873 current rtx value. */
7875 static bool
7876 maybe_legitimize_operand_same_code (enum insn_code icode, unsigned int opno,
7877 class expand_operand *op)
7879 /* See if the operand matches in its current form. */
7880 if (insn_operand_matches (icode, opno, op->value))
7881 return true;
7883 /* If the operand is a memory whose address has no side effects,
7884 try forcing the address into a non-virtual pseudo register.
7885 The check for side effects is important because copy_to_mode_reg
7886 cannot handle things like auto-modified addresses. */
7887 if (insn_data[(int) icode].operand[opno].allows_mem && MEM_P (op->value))
7889 rtx addr, mem;
7891 mem = op->value;
7892 addr = XEXP (mem, 0);
7893 if (!(REG_P (addr) && REGNO (addr) > LAST_VIRTUAL_REGISTER)
7894 && !side_effects_p (addr))
7896 rtx_insn *last;
7897 machine_mode mode;
7899 last = get_last_insn ();
7900 mode = get_address_mode (mem);
7901 mem = replace_equiv_address (mem, copy_to_mode_reg (mode, addr));
7902 if (insn_operand_matches (icode, opno, mem))
7904 op->value = mem;
7905 return true;
7907 delete_insns_since (last);
7911 return false;
7914 /* Try to make OP match operand OPNO of instruction ICODE. Return true
7915 on success, storing the new operand value back in OP. */
7917 static bool
7918 maybe_legitimize_operand (enum insn_code icode, unsigned int opno,
7919 class expand_operand *op)
7921 machine_mode mode, imode, tmode;
7923 mode = op->mode;
7924 switch (op->type)
7926 case EXPAND_FIXED:
7928 temporary_volatile_ok v (true);
7929 return maybe_legitimize_operand_same_code (icode, opno, op);
7932 case EXPAND_OUTPUT:
7933 gcc_assert (mode != VOIDmode);
7934 if (op->value
7935 && op->value != const0_rtx
7936 && GET_MODE (op->value) == mode
7937 && maybe_legitimize_operand_same_code (icode, opno, op))
7938 return true;
7940 op->value = gen_reg_rtx (mode);
7941 op->target = 0;
7942 break;
7944 case EXPAND_INPUT:
7945 input:
7946 gcc_assert (mode != VOIDmode);
7947 gcc_assert (GET_MODE (op->value) == VOIDmode
7948 || GET_MODE (op->value) == mode);
7949 if (maybe_legitimize_operand_same_code (icode, opno, op))
7950 return true;
7952 op->value = copy_to_mode_reg (mode, op->value);
7953 break;
7955 case EXPAND_CONVERT_TO:
7956 gcc_assert (mode != VOIDmode);
7957 op->value = convert_to_mode (mode, op->value, op->unsigned_p);
7958 goto input;
7960 case EXPAND_CONVERT_FROM:
7961 if (GET_MODE (op->value) != VOIDmode)
7962 mode = GET_MODE (op->value);
7963 else
7964 /* The caller must tell us what mode this value has. */
7965 gcc_assert (mode != VOIDmode);
7967 imode = insn_data[(int) icode].operand[opno].mode;
7968 tmode = (VECTOR_MODE_P (imode) && !VECTOR_MODE_P (mode)
7969 ? GET_MODE_INNER (imode) : imode);
7970 if (tmode != VOIDmode && tmode != mode)
7972 op->value = convert_modes (tmode, mode, op->value, op->unsigned_p);
7973 mode = tmode;
7975 if (imode != VOIDmode && imode != mode)
7977 gcc_assert (VECTOR_MODE_P (imode) && !VECTOR_MODE_P (mode));
7978 op->value = expand_vector_broadcast (imode, op->value);
7979 mode = imode;
7981 goto input;
7983 case EXPAND_ADDRESS:
7984 op->value = convert_memory_address (as_a <scalar_int_mode> (mode),
7985 op->value);
7986 goto input;
7988 case EXPAND_INTEGER:
7989 mode = insn_data[(int) icode].operand[opno].mode;
7990 if (mode != VOIDmode
7991 && known_eq (trunc_int_for_mode (op->int_value, mode),
7992 op->int_value))
7994 op->value = gen_int_mode (op->int_value, mode);
7995 goto input;
7997 break;
7999 return insn_operand_matches (icode, opno, op->value);
8002 /* Make OP describe an input operand that should have the same value
8003 as VALUE, after any mode conversion that the target might request.
8004 TYPE is the type of VALUE. */
8006 void
8007 create_convert_operand_from_type (class expand_operand *op,
8008 rtx value, tree type)
8010 create_convert_operand_from (op, value, TYPE_MODE (type),
8011 TYPE_UNSIGNED (type));
8014 /* Return true if the requirements on operands OP1 and OP2 of instruction
8015 ICODE are similar enough for the result of legitimizing OP1 to be
8016 reusable for OP2. OPNO1 and OPNO2 are the operand numbers associated
8017 with OP1 and OP2 respectively. */
8019 static inline bool
8020 can_reuse_operands_p (enum insn_code icode,
8021 unsigned int opno1, unsigned int opno2,
8022 const class expand_operand *op1,
8023 const class expand_operand *op2)
8025 /* Check requirements that are common to all types. */
8026 if (op1->type != op2->type
8027 || op1->mode != op2->mode
8028 || (insn_data[(int) icode].operand[opno1].mode
8029 != insn_data[(int) icode].operand[opno2].mode))
8030 return false;
8032 /* Check the requirements for specific types. */
8033 switch (op1->type)
8035 case EXPAND_OUTPUT:
8036 /* Outputs must remain distinct. */
8037 return false;
8039 case EXPAND_FIXED:
8040 case EXPAND_INPUT:
8041 case EXPAND_ADDRESS:
8042 case EXPAND_INTEGER:
8043 return true;
8045 case EXPAND_CONVERT_TO:
8046 case EXPAND_CONVERT_FROM:
8047 return op1->unsigned_p == op2->unsigned_p;
8049 gcc_unreachable ();
8052 /* Try to make operands [OPS, OPS + NOPS) match operands [OPNO, OPNO + NOPS)
8053 of instruction ICODE. Return true on success, leaving the new operand
8054 values in the OPS themselves. Emit no code on failure. */
8056 bool
8057 maybe_legitimize_operands (enum insn_code icode, unsigned int opno,
8058 unsigned int nops, class expand_operand *ops)
8060 rtx_insn *last = get_last_insn ();
8061 rtx *orig_values = XALLOCAVEC (rtx, nops);
8062 for (unsigned int i = 0; i < nops; i++)
8064 orig_values[i] = ops[i].value;
8066 /* First try reusing the result of an earlier legitimization.
8067 This avoids duplicate rtl and ensures that tied operands
8068 remain tied.
8070 This search is linear, but NOPS is bounded at compile time
8071 to a small number (current a single digit). */
8072 unsigned int j = 0;
8073 for (; j < i; ++j)
8074 if (can_reuse_operands_p (icode, opno + j, opno + i, &ops[j], &ops[i])
8075 && rtx_equal_p (orig_values[j], orig_values[i])
8076 && ops[j].value
8077 && insn_operand_matches (icode, opno + i, ops[j].value))
8079 ops[i].value = copy_rtx (ops[j].value);
8080 break;
8083 /* Otherwise try legitimizing the operand on its own. */
8084 if (j == i && !maybe_legitimize_operand (icode, opno + i, &ops[i]))
8086 delete_insns_since (last);
8087 return false;
8090 return true;
8093 /* Try to generate instruction ICODE, using operands [OPS, OPS + NOPS)
8094 as its operands. Return the instruction pattern on success,
8095 and emit any necessary set-up code. Return null and emit no
8096 code on failure. */
8098 rtx_insn *
8099 maybe_gen_insn (enum insn_code icode, unsigned int nops,
8100 class expand_operand *ops)
8102 gcc_assert (nops == (unsigned int) insn_data[(int) icode].n_generator_args);
8103 if (!maybe_legitimize_operands (icode, 0, nops, ops))
8104 return NULL;
8106 switch (nops)
8108 case 0:
8109 return GEN_FCN (icode) ();
8110 case 1:
8111 return GEN_FCN (icode) (ops[0].value);
8112 case 2:
8113 return GEN_FCN (icode) (ops[0].value, ops[1].value);
8114 case 3:
8115 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value);
8116 case 4:
8117 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
8118 ops[3].value);
8119 case 5:
8120 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
8121 ops[3].value, ops[4].value);
8122 case 6:
8123 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
8124 ops[3].value, ops[4].value, ops[5].value);
8125 case 7:
8126 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
8127 ops[3].value, ops[4].value, ops[5].value,
8128 ops[6].value);
8129 case 8:
8130 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
8131 ops[3].value, ops[4].value, ops[5].value,
8132 ops[6].value, ops[7].value);
8133 case 9:
8134 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
8135 ops[3].value, ops[4].value, ops[5].value,
8136 ops[6].value, ops[7].value, ops[8].value);
8137 case 10:
8138 return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
8139 ops[3].value, ops[4].value, ops[5].value,
8140 ops[6].value, ops[7].value, ops[8].value,
8141 ops[9].value);
8143 gcc_unreachable ();
8146 /* Try to emit instruction ICODE, using operands [OPS, OPS + NOPS)
8147 as its operands. Return true on success and emit no code on failure. */
8149 bool
8150 maybe_expand_insn (enum insn_code icode, unsigned int nops,
8151 class expand_operand *ops)
8153 rtx_insn *pat = maybe_gen_insn (icode, nops, ops);
8154 if (pat)
8156 emit_insn (pat);
8157 return true;
8159 return false;
8162 /* Like maybe_expand_insn, but for jumps. */
8164 bool
8165 maybe_expand_jump_insn (enum insn_code icode, unsigned int nops,
8166 class expand_operand *ops)
8168 rtx_insn *pat = maybe_gen_insn (icode, nops, ops);
8169 if (pat)
8171 emit_jump_insn (pat);
8172 return true;
8174 return false;
8177 /* Emit instruction ICODE, using operands [OPS, OPS + NOPS)
8178 as its operands. */
8180 void
8181 expand_insn (enum insn_code icode, unsigned int nops,
8182 class expand_operand *ops)
8184 if (!maybe_expand_insn (icode, nops, ops))
8185 gcc_unreachable ();
8188 /* Like expand_insn, but for jumps. */
8190 void
8191 expand_jump_insn (enum insn_code icode, unsigned int nops,
8192 class expand_operand *ops)
8194 if (!maybe_expand_jump_insn (icode, nops, ops))
8195 gcc_unreachable ();